comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
Why is this the case? | public void dockerImageRepoIsReturnedIfSet() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application1 = tester.makeApplicationId();
SystemState state1 = prepare(application1, tester, 1, 1, 1, 1, defaultResources, "1.2.3");
String dockerImageRepo = "docker.domain.tld/my/image";
prepare(application1, tester, 1, 1, 1 , 1 , false, defaultResources, "1.2.3", Optional.of(dockerImageRepo));
tester.activate(application1, state1.allHosts);
HostSpec host1 = state1.container0.iterator().next();
assertFalse(host1.version().isPresent());
Node node1 = tester.nodeRepository().getNode(host1.hostname()).get();
tester.nodeRepository().write(node1.with(node1.status().withDockerImage(DockerImage.fromString(dockerImageRepo))), () -> {});
SystemState state2 = prepare(application1, tester, 1, 1, 1 ,1 , false, defaultResources, "1.2.3", Optional.of(dockerImageRepo));
tester.activate(application1, state2.allHosts);
host1 = state2.container0.iterator().next();
assertEquals(dockerImageRepo, host1.dockerImageRepo().get());
} | assertFalse(host1.version().isPresent()); | public void dockerImageRepoIsReturnedIfSet() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application1 = tester.makeApplicationId();
SystemState state1 = prepare(application1, tester, 1, 1, 1, 1, defaultResources, "1.2.3");
String dockerImageRepo = "docker.domain.tld/my/image";
prepare(application1, tester, 1, 1, 1 , 1 , false, defaultResources, "1.2.3", Optional.of(dockerImageRepo));
tester.activate(application1, state1.allHosts);
HostSpec host1 = state1.container0.iterator().next();
Node node1 = tester.nodeRepository().getNode(host1.hostname()).get();
DockerImage dockerImage = DockerImage.fromString(dockerImageRepo).withTag(Version.fromString("1.2.3"));
tester.nodeRepository().write(node1.with(node1.status().withDockerImage(dockerImage)), () -> {});
SystemState state2 = prepare(application1, tester, 1, 1, 1 ,1 , false, defaultResources, "1.2.3", Optional.of(dockerImageRepo));
tester.activate(application1, state2.allHosts);
host1 = state2.container0.iterator().next();
node1 = tester.nodeRepository().getNode(host1.hostname()).get();
assertEquals(dockerImage, node1.status().dockerImage().get());
} | class ProvisioningTest {
private static final NodeResources defaultResources = new NodeResources(1, 4, 10, 4);
@Test
public void application_deployment_constant_application_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
ApplicationId application2 = tester.makeApplicationId();
tester.makeReadyNodes(21, defaultResources);
SystemState state1 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state2.assertEquals(state1);
tester.activate(application1, state2.allHosts);
SystemState state1App2 = prepare(application2, 2, 2, 3, 3, defaultResources, tester);
assertFalse("Hosts to different apps are disjunct", state1App2.allHosts.removeAll(state1.allHosts));
tester.activate(application2, state1App2.allHosts);
SystemState state3 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
SystemState state4 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state3.assertEquals(state2);
state4.assertEquals(state3);
tester.activate(application1, state4.allHosts);
SystemState state5 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
HostSpec removed = tester.removeOne(state5.allHosts);
tester.activate(application1, state5.allHosts);
assertEquals(removed.hostname(), tester.nodeRepository().getNodes(application1, Node.State.inactive).get(0).hostname());
SystemState state6 = prepare(application1, 0, 2, 0, 3, defaultResources, tester);
tester.activate(application1, state6.allHosts);
assertEquals(5, tester.getNodes(application1, Node.State.active).size());
assertEquals(5, tester.getNodes(application1, Node.State.inactive).size());
NestedTransaction removeTransaction = new NestedTransaction();
tester.provisioner().remove(removeTransaction, application1);
removeTransaction.commit();
assertEquals(tester.toHostNames(state1.allHosts), tester.toHostNames(tester.nodeRepository().getNodes(application1, Node.State.inactive)));
assertEquals(0, tester.getNodes(application1, Node.State.active).size());
assertEquals(state1App2.hostNames(), tester.toHostNames(tester.nodeRepository().getNodes(application2, Node.State.active)));
HostSpec failed = tester.removeOne(state1App2.allHosts);
tester.fail(failed);
assertEquals(9, tester.getNodes(application2, Node.State.active).size());
SystemState state2App2 = prepare(application2, 2, 2, 3, 3, defaultResources, tester);
assertFalse("Hosts to different apps are disjunct", state2App2.allHosts.removeAll(state1.allHosts));
assertEquals("A new node was reserved to replace the failed one", 10, state2App2.allHosts.size());
assertFalse("The new host is not the failed one", state2App2.allHosts.contains(failed));
tester.activate(application2, state2App2.allHosts);
SystemState state7 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state7.assertEquals(state1);
tester.activate(application1, state7.allHosts);
assertEquals(0, tester.getNodes(application1, Node.State.inactive).size());
HostFilter allFilter = HostFilter.all();
HostFilter hostFilter = HostFilter.hostname(state6.allHosts.iterator().next().hostname());
HostFilter clusterTypeFilter = HostFilter.clusterType(ClusterSpec.Type.container);
HostFilter clusterIdFilter = HostFilter.clusterId(ClusterSpec.Id.from("container1"));
tester.provisioner().restart(application1, allFilter);
tester.provisioner().restart(application1, hostFilter);
tester.provisioner().restart(application1, clusterTypeFilter);
tester.provisioner().restart(application1, clusterIdFilter);
tester.assertRestartCount(application1, allFilter, hostFilter, clusterTypeFilter, clusterIdFilter);
}
@Test
public void nodeVersionIsReturnedIfSet() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application1 = tester.makeApplicationId();
SystemState state1 = prepare(application1, 1, 1, 1, 1, defaultResources, tester);
tester.activate(application1, state1.allHosts);
HostSpec host1 = state1.container0.iterator().next();
assertFalse(host1.version().isPresent());
Node node1 = tester.nodeRepository().getNode(host1.hostname()).get();
tester.nodeRepository().write(node1.with(node1.status().withVespaVersion(Version.fromString("1.2.3"))), () -> {});
SystemState state2 = prepare(application1, 1, 1, 1, 1, defaultResources, tester);
tester.activate(application1, state2.allHosts);
host1 = state2.container0.iterator().next();
assertEquals(Version.fromString("1.2.3"), host1.version().get());
}
@Test
@Test
public void application_deployment_variable_application_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(24, defaultResources);
SystemState state1 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 3, 4, 4, 5, defaultResources, tester);
state2.assertExtends(state1);
assertEquals("New nodes are reserved", 6, tester.getNodes(application1, Node.State.reserved).size());
tester.activate(application1, state2.allHosts);
SystemState state3 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state3.allHosts);
assertEquals("Superfluous container nodes are deactivated",
3-2 + 4-2, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Superfluous content nodes are retired",
4-3 + 5-3, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state4 = prepare(application1, 4, 5, 5, 6, defaultResources, tester);
assertEquals("Inactive nodes are reused", 0, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Earlier retired nodes are not unretired before activate",
4-3 + 5-3, tester.getNodes(application1, Node.State.active).retired().size());
state4.assertExtends(state2);
assertEquals("New and inactive nodes are reserved", 4 + 3, tester.getNodes(application1, Node.State.reserved).size());
HostSpec removed = state4.removeHost(tester.getNodes(application1, Node.State.active).retired().asList().get(0).hostname());
tester.activate(application1, state4.allHosts);
assertEquals("Retired active removed when activating became inactive",
1, tester.getNodes(application1, Node.State.inactive).asList().size());
assertEquals(removed.hostname(), tester.getNodes(application1, Node.State.inactive).asList().get(0).hostname());
assertEquals("Earlier retired nodes are unretired on activate",
0, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state5 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state5.allHosts);
assertEquals("Superfluous container nodes are also deactivated",
4-2 + 5-2 + 1, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Superfluous content nodes are retired",
5-3 + 6-3 - 1, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state6 = prepare(application1, 2, 2, 4, 3, defaultResources, tester);
tester.activate(application1, state6.allHosts);
assertEquals("One content node is unretired",
5-4 + 6-3 - 1, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state7 = prepare(application1, 8, 2, 2, 2, defaultResources, tester);
NestedTransaction removeTransaction = new NestedTransaction();
tester.provisioner().remove(removeTransaction, application1);
removeTransaction.commit();
assertEquals(0, tester.getNodes(application1, Node.State.active).size());
assertEquals(0, tester.getNodes(application1, Node.State.reserved).size());
}
@Test
public void application_deployment_multiple_flavors() {
NodeResources small = new NodeResources(1, 4, 10, 0.3);
NodeResources large = new NodeResources(8, 8, 40, 0.3);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(12, small);
SystemState state1 = prepare(application1, 2, 2, 4, 4, small, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 3, 3, small, tester);
tester.activate(application1, state2.allHosts);
tester.makeReadyNodes(16, large);
SystemState state3 = prepare(application1, 3, 4, 4, 5, large, tester);
assertEquals("New nodes are reserved", 16, tester.nodeRepository().getNodes(application1, Node.State.reserved).size());
tester.activate(application1, state3.allHosts);
assertEquals("small container nodes are retired because we are swapping the entire cluster",
2 + 2, tester.getNodes(application1, Node.State.active).retired().type(ClusterSpec.Type.container).resources(small).size());
assertEquals("'small content nodes are retired",
4 + 4, tester.getNodes(application1, Node.State.active).retired().type(ClusterSpec.Type.content).resources(small).size());
assertEquals("No large content nodes are retired",
0, tester.getNodes(application1, Node.State.active).retired().resources(large).size());
}
@Test
public void application_deployment_above_then_at_capacity_limit() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(5, defaultResources);
SystemState state1 = prepare(application1, 2, 0, 3, 0, defaultResources, tester);
tester.activate(application1, state1.allHosts);
try {
SystemState state2 = prepare(application1, 3, 0, 3, 0, defaultResources, tester);
fail("Expected out of capacity exception");
}
catch (OutOfCapacityException expected) {
}
SystemState state3 = prepare(application1, 2, 0, 3, 0, defaultResources, tester);
tester.activate(application1, state3.allHosts);
}
@Test
public void dev_deployment_node_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void requested_resources_info_is_retained() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
{
SystemState state = prepare(application, 0, 0, 3, 3,
defaultResources.justNumbers(),
tester);
assertEquals(6, state.allHosts.size());
tester.activate(application, state.allHosts);
assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.any));
assertTrue(tester.nodeRepository().getNodes(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any));
}
{
SystemState state = prepare(application, 0, 0, 5, 3,
defaultResources,
tester);
assertEquals(8, state.allHosts.size());
tester.activate(application, state.allHosts);
assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.fast));
assertTrue(tester.nodeRepository().getNodes(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.fast));
}
}
@Test
public void deploy_specific_vespa_version() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, tester, 2, 2, 3, 3, defaultResources, "6.91");
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void deploy_specific_vespa_version_and_docker_image() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
String dockerImageRepo = "docker.domain.tld/my/image";
SystemState state = prepare(application, tester, 2, 2, 3, 3, false, defaultResources, "6.91", Optional.of(dockerImageRepo));
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void test_deployment_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.test, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(4, defaultResources);
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test(expected = IllegalArgumentException.class)
public void prod_deployment_requires_redundancy() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
prepare(application, 1, 2, 3, 3, defaultResources, tester);
}
/** Dev always uses the zone default flavor */
@Test
public void dev_deployment_flavor() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, new NodeResources(2, 4, 10, 2), NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3,
new NodeResources(2, 4, 10, 2), tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
/** Test always uses the zone default resources */
@Test
public void test_deployment_resources() {
NodeResources large = new NodeResources(2, 4, 10, 0.3);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.test, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(4, large);
SystemState state = prepare(application, 2, 2, 3, 3, large, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void staging_deployment_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.staging, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(14, defaultResources);
SystemState state = prepare(application, 1, 1, 1, 64, defaultResources, tester);
assertEquals(9, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void activate_after_reservation_timeout() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(10, defaultResources);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
NestedTransaction deactivateTransaction = new NestedTransaction();
tester.nodeRepository().deactivate(application, deactivateTransaction);
deactivateTransaction.commit();
try {
tester.activate(application, state.allHosts);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertTrue(e.getMessage().startsWith("Activation of " + application + " failed"));
}
}
@Test
public void out_of_capacity() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(9, defaultResources);
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, 2, 2, 3, 3, defaultResources, tester);
fail("Expected exception");
}
catch (OutOfCapacityException e) {
assertTrue(e.getMessage().startsWith("Could not satisfy request"));
}
}
@Test
public void out_of_quota() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.Public,
Environment.prod,
RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources);
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, 2, 2, 6, 3, defaultResources, tester);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("6 nodes [vcpu: 1.0, memory: 4.0 Gb, disk 10.0 Gb, bandwidth: 4.0 Gbps] requested for content cluster 'content0' 6.42 exceeds your quota. Resolve this at https:
e.getMessage());
}
}
@Test
public void no_out_of_quota_outside_public() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.main,
Environment.prod,
RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources);
ApplicationId application = tester.makeApplicationId();
prepare(application, 2, 2, 6, 3, defaultResources, tester);
}
@Test
public void out_of_capacity_but_cannot_fail() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources);
ApplicationId application = tester.makeApplicationId();
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build();
tester.prepare(application, cluster, Capacity.fromCount(5, Optional.empty(), false, false), 1);
}
@Test
public void out_of_capacity_all_nodes_want_to_retire() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
List<Node> readyNodes = tester.makeReadyNodes(5, defaultResources);
readyNodes.forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
try {
prepare(application, 2, 0, 2, 0, defaultResources, tester);
fail("Expected exception");
} catch (OutOfCapacityException e) {
assertTrue(e.getMessage().startsWith("Could not satisfy request"));
}
}
@Test
public void want_to_retire_but_cannot_fail() {
Capacity capacity = Capacity.fromCount(5, Optional.of(defaultResources), false, true);
Capacity capacityFORCED = Capacity.fromCount(5, Optional.of(defaultResources), false, false);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build();
tester.activate(application, tester.prepare(application, cluster, capacity, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(0, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
tester.nodeRepository().getNodes(application, Node.State.active).forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
tester.activate(application, tester.prepare(application, cluster, capacityFORCED, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(0, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
tester.nodeRepository().getNodes(application, Node.State.active).forEach(node -> assertTrue(node.status().wantToRetire()));
tester.activate(application, tester.prepare(application, cluster, capacity, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
}
@Test
public void highest_node_indexes_are_retired_first() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(14, defaultResources);
SystemState state1 = prepare(application1, 3, 3, 4, 4, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 2, 2, defaultResources, tester);
tester.activate(application1, state2.allHosts);
assertFalse(state2.hostByMembership("content0", 0, 0).membership().get().retired());
assertFalse( state2.hostByMembership("content0", 0, 1).membership().get().retired());
assertTrue( state2.hostByMembership("content0", 0, 2).membership().get().retired());
assertTrue( state2.hostByMembership("content0", 0, 3).membership().get().retired());
assertFalse(state2.hostByMembership("content1", 0, 0).membership().get().retired());
assertFalse(state2.hostByMembership("content1", 0, 1).membership().get().retired());
assertTrue( state2.hostByMembership("content1", 0, 2).membership().get().retired());
assertTrue( state2.hostByMembership("content1", 0, 3).membership().get().retired());
}
@Test
public void application_deployment_retires_nodes_that_want_to_retire() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
{
SystemState state = prepare(application, 2, 0, 2, 0,defaultResources, tester);
tester.activate(application, state.allHosts);
assertEquals(4, tester.getNodes(application, Node.State.active).size());
}
{
List<Node> nodesToRetire = tester.getNodes(application, Node.State.active).asList().subList(0, 2);
nodesToRetire.forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
SystemState state = prepare(application, 2, 0, 2, 0, defaultResources, tester);
tester.activate(application, state.allHosts);
List<Node> retiredNodes = tester.getNodes(application).retired().asList();
assertEquals(2, retiredNodes.size());
assertTrue("Nodes are retired by system", retiredNodes.stream().allMatch(retiredBy(Agent.system)));
}
}
@Test
public void application_deployment_extends_existing_reservations_on_deploy() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(2, defaultResources);
try {
prepare(application, 2, 0, 2, 0, defaultResources, tester);
fail("Expected exception");
} catch (OutOfCapacityException ignored) {}
assertEquals("Reserved a subset of required nodes", 2,
tester.getNodes(application, Node.State.reserved).size());
tester.makeReadyNodes(2, defaultResources);
tester.clock().advance(Duration.ofMinutes(2));
SystemState state = prepare(application, 2, 0, 2, 0, defaultResources, tester);
List<Node> reserved = tester.getNodes(application, Node.State.reserved).asList();
assertEquals("Reserved required nodes", 4, reserved.size());
assertTrue("Time of event is updated for all nodes",
reserved.stream()
.allMatch(n -> n.history()
.event(History.Event.Type.reserved)
.get().at()
.equals(tester.clock().instant().truncatedTo(MILLIS))));
tester.clock().advance(Duration.ofMinutes(8).plus(Duration.ofSeconds(1)));
ReservationExpirer expirer = new ReservationExpirer(tester.nodeRepository(), tester.clock(),
Duration.ofMinutes(10));
expirer.run();
assertEquals("Nodes remain reserved", 4,
tester.getNodes(application, Node.State.reserved).size());
tester.activate(application, state.allHosts);
assertEquals(4, tester.getNodes(application, Node.State.active).size());
}
@Test
public void required_capacity_respects_prod_redundancy_requirement() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, "6.42", Optional.empty());
fail("Expected exception");
} catch (IllegalArgumentException ignored) {}
}
@Test
public void devsystem_application_deployment_on_devhost() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.dev, Environment.dev, RegionName.from("no-central"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.devhost, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.devhost);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void cluster_spec_update_for_already_reserved_nodes() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
String version1 = "6.42";
String version2 = "6.43";
tester.makeReadyNodes(2, defaultResources);
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, version1, Optional.empty());
tester.getNodes(application, Node.State.reserved).forEach(node ->
assertEquals(Version.fromString(version1), node.allocation().get().membership().cluster().vespaVersion()));
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, version2, Optional.empty());
tester.getNodes(application, Node.State.reserved).forEach(node ->
assertEquals(Version.fromString(version2), node.allocation().get().membership().cluster().vespaVersion()));
}
@Test
public void change_to_and_from_combined_cluster_does_not_change_node_allocation() {
var tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
var application = tester.makeApplicationId();
tester.makeReadyNodes(4, defaultResources);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
var initialNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
cluster = ClusterSpec.request(ClusterSpec.Type.combined, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
var newNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
assertEquals("Node allocation remains the same", initialNodes, newNodes);
assertEquals("Cluster type is updated",
Set.of(ClusterSpec.Type.combined),
newNodes.stream().map(n -> n.membership().get().cluster().type()).collect(Collectors.toSet()));
cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
newNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
assertEquals("Node allocation remains the same", initialNodes, newNodes);
assertEquals("Cluster type is updated",
Set.of(ClusterSpec.Type.content),
newNodes.stream().map(n -> n.membership().get().cluster().type()).collect(Collectors.toSet()));
}
private SystemState prepare(ApplicationId application, int container0Size, int container1Size, int content0Size,
int content1Size, NodeResources flavor, ProvisioningTester tester) {
return prepare(application, tester, container0Size, container1Size, content0Size, content1Size, flavor, "6.42");
}
private SystemState prepare(ApplicationId application, ProvisioningTester tester, int container0Size, int container1Size, int content0Size,
int content1Size, NodeResources nodeResources, String wantedVersion) {
return prepare(application, tester, container0Size, container1Size, content0Size, content1Size, false, nodeResources,
wantedVersion, Optional.empty());
}
private SystemState prepare(ApplicationId application, ProvisioningTester tester, int container0Size, int container1Size, int content0Size,
int content1Size, boolean required, NodeResources nodeResources, String wantedVersion, Optional<String> dockerImageRepo) {
ClusterSpec containerCluster0 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container0")).vespaVersion(wantedVersion).build();
ClusterSpec containerCluster1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container1")).vespaVersion(wantedVersion).build();
ClusterSpec contentCluster0 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content0")).vespaVersion(wantedVersion).build();
ClusterSpec contentCluster1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content1")).vespaVersion(wantedVersion).build();
Set<HostSpec> container0 = prepare(application, tester, containerCluster0, container0Size, 1, required, nodeResources);
Set<HostSpec> container1 = prepare(application, tester, containerCluster1, container1Size, 1, required, nodeResources);
Set<HostSpec> content0 = prepare(application, tester, contentCluster0, content0Size, 1, required, nodeResources);
Set<HostSpec> content1 = prepare(application, tester, contentCluster1, content1Size, 1, required, nodeResources);
Set<HostSpec> allHosts = new HashSet<>();
allHosts.addAll(container0);
allHosts.addAll(container1);
allHosts.addAll(content0);
allHosts.addAll(content1);
Function<Integer, Capacity> capacity = count -> Capacity.fromCount(count, Optional.empty(), required, true);
int expectedContainer0Size = tester.capacityPolicies().decideSize(capacity.apply(container0Size), containerCluster0, application);
int expectedContainer1Size = tester.capacityPolicies().decideSize(capacity.apply(container1Size), containerCluster1, application);
int expectedContent0Size = tester.capacityPolicies().decideSize(capacity.apply(content0Size), contentCluster0, application);
int expectedContent1Size = tester.capacityPolicies().decideSize(capacity.apply(content1Size), contentCluster1, application);
assertEquals("Hosts in each group cluster is disjunct and the total number of unretired nodes is correct",
expectedContainer0Size + expectedContainer1Size + expectedContent0Size + expectedContent1Size,
tester.nonRetired(allHosts).size());
assertEquals(expectedContainer0Size, tester.nonRetired(container0).size());
assertEquals(expectedContainer1Size, tester.nonRetired(container1).size());
assertEquals(expectedContent0Size, tester.nonRetired(content0).size());
assertEquals(expectedContent1Size, tester.nonRetired(content1).size());
tester.assertMembersOf(containerCluster0, container0);
tester.assertMembersOf(containerCluster1, container1);
tester.assertMembersOf(contentCluster0, content0);
tester.assertMembersOf(contentCluster1, content1);
return new SystemState(allHosts, container0, container1, content0, content1);
}
private Set<HostSpec> prepare(ApplicationId application, ProvisioningTester tester, ClusterSpec cluster, int nodeCount, int groups,
boolean required, NodeResources nodeResources) {
if (nodeCount == 0) return Collections.emptySet();
return new HashSet<>(tester.prepare(application, cluster, nodeCount, groups, required, nodeResources));
}
private static class SystemState {
private Set<HostSpec> allHosts;
private Set<HostSpec> container0;
private Set<HostSpec> container1;
private Set<HostSpec> content0;
private Set<HostSpec> content1;
public SystemState(Set<HostSpec> allHosts,
Set<HostSpec> container1,
Set<HostSpec> container2,
Set<HostSpec> content0,
Set<HostSpec> content1) {
this.allHosts = allHosts;
this.container0 = container1;
this.container1 = container2;
this.content0 = content0;
this.content1 = content1;
}
/** Returns a host by cluster name and index, or null if there is no host with the given values in this */
public HostSpec hostByMembership(String clusterId, int group, int index) {
for (HostSpec host : allHosts) {
if ( ! host.membership().isPresent()) continue;
ClusterMembership membership = host.membership().get();
if (membership.cluster().id().value().equals(clusterId) &&
groupMatches(membership.cluster().group(), group) &&
membership.index() == index)
return host;
}
return null;
}
private boolean groupMatches(Optional<ClusterSpec.Group> clusterGroup, int group) {
if ( ! clusterGroup.isPresent()) return group==0;
return clusterGroup.get().index() == group;
}
public Set<String> hostNames() {
return allHosts.stream().map(HostSpec::hostname).collect(Collectors.toSet());
}
public HostSpec removeHost(String hostname) {
for (Iterator<HostSpec> i = allHosts.iterator(); i.hasNext();) {
HostSpec host = i.next();
if (host.hostname().equals(hostname)) {
i.remove();
return host;
}
}
return null;
}
public void assertExtends(SystemState other) {
assertTrue(this.allHosts.containsAll(other.allHosts));
assertExtends(this.container0, other.container0);
assertExtends(this.container1, other.container1);
assertExtends(this.content0, other.content0);
assertExtends(this.content1, other.content1);
}
private void assertExtends(Set<HostSpec> extension,
Set<HostSpec> original) {
for (HostSpec originalHost : original) {
HostSpec newHost = findHost(originalHost.hostname(), extension);
org.junit.Assert.assertEquals(newHost.membership(), originalHost.membership());
}
}
private HostSpec findHost(String hostName, Set<HostSpec> hosts) {
for (HostSpec host : hosts)
if (host.hostname().equals(hostName))
return host;
return null;
}
public void assertEquals(SystemState other) {
org.junit.Assert.assertEquals(this.allHosts, other.allHosts);
org.junit.Assert.assertEquals(this.container0, other.container0);
org.junit.Assert.assertEquals(this.container1, other.container1);
org.junit.Assert.assertEquals(this.content0, other.content0);
org.junit.Assert.assertEquals(this.content1, other.content1);
}
}
/** A predicate that returns whether a node has been retired by the given agent */
private static Predicate<Node> retiredBy(Agent agent) {
return (node) -> node.history().event(History.Event.Type.retired)
.filter(e -> e.type() == History.Event.Type.retired)
.filter(e -> e.agent() == agent)
.isPresent();
}
} | class ProvisioningTest {
private static final NodeResources defaultResources = new NodeResources(1, 4, 10, 4);
@Test
public void application_deployment_constant_application_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
ApplicationId application2 = tester.makeApplicationId();
tester.makeReadyNodes(21, defaultResources);
SystemState state1 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state2.assertEquals(state1);
tester.activate(application1, state2.allHosts);
SystemState state1App2 = prepare(application2, 2, 2, 3, 3, defaultResources, tester);
assertFalse("Hosts to different apps are disjunct", state1App2.allHosts.removeAll(state1.allHosts));
tester.activate(application2, state1App2.allHosts);
SystemState state3 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
SystemState state4 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state3.assertEquals(state2);
state4.assertEquals(state3);
tester.activate(application1, state4.allHosts);
SystemState state5 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
HostSpec removed = tester.removeOne(state5.allHosts);
tester.activate(application1, state5.allHosts);
assertEquals(removed.hostname(), tester.nodeRepository().getNodes(application1, Node.State.inactive).get(0).hostname());
SystemState state6 = prepare(application1, 0, 2, 0, 3, defaultResources, tester);
tester.activate(application1, state6.allHosts);
assertEquals(5, tester.getNodes(application1, Node.State.active).size());
assertEquals(5, tester.getNodes(application1, Node.State.inactive).size());
NestedTransaction removeTransaction = new NestedTransaction();
tester.provisioner().remove(removeTransaction, application1);
removeTransaction.commit();
assertEquals(tester.toHostNames(state1.allHosts), tester.toHostNames(tester.nodeRepository().getNodes(application1, Node.State.inactive)));
assertEquals(0, tester.getNodes(application1, Node.State.active).size());
assertEquals(state1App2.hostNames(), tester.toHostNames(tester.nodeRepository().getNodes(application2, Node.State.active)));
HostSpec failed = tester.removeOne(state1App2.allHosts);
tester.fail(failed);
assertEquals(9, tester.getNodes(application2, Node.State.active).size());
SystemState state2App2 = prepare(application2, 2, 2, 3, 3, defaultResources, tester);
assertFalse("Hosts to different apps are disjunct", state2App2.allHosts.removeAll(state1.allHosts));
assertEquals("A new node was reserved to replace the failed one", 10, state2App2.allHosts.size());
assertFalse("The new host is not the failed one", state2App2.allHosts.contains(failed));
tester.activate(application2, state2App2.allHosts);
SystemState state7 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state7.assertEquals(state1);
tester.activate(application1, state7.allHosts);
assertEquals(0, tester.getNodes(application1, Node.State.inactive).size());
HostFilter allFilter = HostFilter.all();
HostFilter hostFilter = HostFilter.hostname(state6.allHosts.iterator().next().hostname());
HostFilter clusterTypeFilter = HostFilter.clusterType(ClusterSpec.Type.container);
HostFilter clusterIdFilter = HostFilter.clusterId(ClusterSpec.Id.from("container1"));
tester.provisioner().restart(application1, allFilter);
tester.provisioner().restart(application1, hostFilter);
tester.provisioner().restart(application1, clusterTypeFilter);
tester.provisioner().restart(application1, clusterIdFilter);
tester.assertRestartCount(application1, allFilter, hostFilter, clusterTypeFilter, clusterIdFilter);
}
@Test
public void nodeVersionIsReturnedIfSet() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application1 = tester.makeApplicationId();
SystemState state1 = prepare(application1, 1, 1, 1, 1, defaultResources, tester);
tester.activate(application1, state1.allHosts);
HostSpec host1 = state1.container0.iterator().next();
assertFalse(host1.version().isPresent());
Node node1 = tester.nodeRepository().getNode(host1.hostname()).get();
tester.nodeRepository().write(node1.with(node1.status().withVespaVersion(Version.fromString("1.2.3"))), () -> {});
SystemState state2 = prepare(application1, 1, 1, 1, 1, defaultResources, tester);
tester.activate(application1, state2.allHosts);
host1 = state2.container0.iterator().next();
assertEquals(Version.fromString("1.2.3"), host1.version().get());
}
@Test
@Test
public void application_deployment_variable_application_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(24, defaultResources);
SystemState state1 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 3, 4, 4, 5, defaultResources, tester);
state2.assertExtends(state1);
assertEquals("New nodes are reserved", 6, tester.getNodes(application1, Node.State.reserved).size());
tester.activate(application1, state2.allHosts);
SystemState state3 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state3.allHosts);
assertEquals("Superfluous container nodes are deactivated",
3-2 + 4-2, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Superfluous content nodes are retired",
4-3 + 5-3, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state4 = prepare(application1, 4, 5, 5, 6, defaultResources, tester);
assertEquals("Inactive nodes are reused", 0, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Earlier retired nodes are not unretired before activate",
4-3 + 5-3, tester.getNodes(application1, Node.State.active).retired().size());
state4.assertExtends(state2);
assertEquals("New and inactive nodes are reserved", 4 + 3, tester.getNodes(application1, Node.State.reserved).size());
HostSpec removed = state4.removeHost(tester.getNodes(application1, Node.State.active).retired().asList().get(0).hostname());
tester.activate(application1, state4.allHosts);
assertEquals("Retired active removed when activating became inactive",
1, tester.getNodes(application1, Node.State.inactive).asList().size());
assertEquals(removed.hostname(), tester.getNodes(application1, Node.State.inactive).asList().get(0).hostname());
assertEquals("Earlier retired nodes are unretired on activate",
0, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state5 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state5.allHosts);
assertEquals("Superfluous container nodes are also deactivated",
4-2 + 5-2 + 1, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Superfluous content nodes are retired",
5-3 + 6-3 - 1, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state6 = prepare(application1, 2, 2, 4, 3, defaultResources, tester);
tester.activate(application1, state6.allHosts);
assertEquals("One content node is unretired",
5-4 + 6-3 - 1, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state7 = prepare(application1, 8, 2, 2, 2, defaultResources, tester);
NestedTransaction removeTransaction = new NestedTransaction();
tester.provisioner().remove(removeTransaction, application1);
removeTransaction.commit();
assertEquals(0, tester.getNodes(application1, Node.State.active).size());
assertEquals(0, tester.getNodes(application1, Node.State.reserved).size());
}
@Test
public void application_deployment_multiple_flavors() {
NodeResources small = new NodeResources(1, 4, 10, 0.3);
NodeResources large = new NodeResources(8, 8, 40, 0.3);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(12, small);
SystemState state1 = prepare(application1, 2, 2, 4, 4, small, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 3, 3, small, tester);
tester.activate(application1, state2.allHosts);
tester.makeReadyNodes(16, large);
SystemState state3 = prepare(application1, 3, 4, 4, 5, large, tester);
assertEquals("New nodes are reserved", 16, tester.nodeRepository().getNodes(application1, Node.State.reserved).size());
tester.activate(application1, state3.allHosts);
assertEquals("small container nodes are retired because we are swapping the entire cluster",
2 + 2, tester.getNodes(application1, Node.State.active).retired().type(ClusterSpec.Type.container).resources(small).size());
assertEquals("'small content nodes are retired",
4 + 4, tester.getNodes(application1, Node.State.active).retired().type(ClusterSpec.Type.content).resources(small).size());
assertEquals("No large content nodes are retired",
0, tester.getNodes(application1, Node.State.active).retired().resources(large).size());
}
@Test
public void application_deployment_above_then_at_capacity_limit() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(5, defaultResources);
SystemState state1 = prepare(application1, 2, 0, 3, 0, defaultResources, tester);
tester.activate(application1, state1.allHosts);
try {
SystemState state2 = prepare(application1, 3, 0, 3, 0, defaultResources, tester);
fail("Expected out of capacity exception");
}
catch (OutOfCapacityException expected) {
}
SystemState state3 = prepare(application1, 2, 0, 3, 0, defaultResources, tester);
tester.activate(application1, state3.allHosts);
}
@Test
public void dev_deployment_node_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void requested_resources_info_is_retained() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
{
SystemState state = prepare(application, 0, 0, 3, 3,
defaultResources.justNumbers(),
tester);
assertEquals(6, state.allHosts.size());
tester.activate(application, state.allHosts);
assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.any));
assertTrue(tester.nodeRepository().getNodes(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any));
}
{
SystemState state = prepare(application, 0, 0, 5, 3,
defaultResources,
tester);
assertEquals(8, state.allHosts.size());
tester.activate(application, state.allHosts);
assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.fast));
assertTrue(tester.nodeRepository().getNodes(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.fast));
}
}
@Test
public void deploy_specific_vespa_version() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, tester, 2, 2, 3, 3, defaultResources, "6.91");
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void deploy_specific_vespa_version_and_docker_image() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
String dockerImageRepo = "docker.domain.tld/my/image";
SystemState state = prepare(application, tester, 2, 2, 3, 3, false, defaultResources, "6.91", Optional.of(dockerImageRepo));
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void test_deployment_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.test, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(4, defaultResources);
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test(expected = IllegalArgumentException.class)
public void prod_deployment_requires_redundancy() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
prepare(application, 1, 2, 3, 3, defaultResources, tester);
}
/** Dev always uses the zone default flavor */
@Test
public void dev_deployment_flavor() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, new NodeResources(2, 4, 10, 2), NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3,
new NodeResources(2, 4, 10, 2), tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
/** Test always uses the zone default resources */
@Test
public void test_deployment_resources() {
NodeResources large = new NodeResources(2, 4, 10, 0.3);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.test, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(4, large);
SystemState state = prepare(application, 2, 2, 3, 3, large, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void staging_deployment_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.staging, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(14, defaultResources);
SystemState state = prepare(application, 1, 1, 1, 64, defaultResources, tester);
assertEquals(9, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void activate_after_reservation_timeout() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(10, defaultResources);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
NestedTransaction deactivateTransaction = new NestedTransaction();
tester.nodeRepository().deactivate(application, deactivateTransaction);
deactivateTransaction.commit();
try {
tester.activate(application, state.allHosts);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertTrue(e.getMessage().startsWith("Activation of " + application + " failed"));
}
}
@Test
public void out_of_capacity() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(9, defaultResources);
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, 2, 2, 3, 3, defaultResources, tester);
fail("Expected exception");
}
catch (OutOfCapacityException e) {
assertTrue(e.getMessage().startsWith("Could not satisfy request"));
}
}
@Test
public void out_of_quota() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.Public,
Environment.prod,
RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources);
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, 2, 2, 6, 3, defaultResources, tester);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("6 nodes [vcpu: 1.0, memory: 4.0 Gb, disk 10.0 Gb, bandwidth: 4.0 Gbps] requested for content cluster 'content0' 6.42 exceeds your quota. Resolve this at https:
e.getMessage());
}
}
@Test
public void no_out_of_quota_outside_public() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.main,
Environment.prod,
RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources);
ApplicationId application = tester.makeApplicationId();
prepare(application, 2, 2, 6, 3, defaultResources, tester);
}
@Test
public void out_of_capacity_but_cannot_fail() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources);
ApplicationId application = tester.makeApplicationId();
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build();
tester.prepare(application, cluster, Capacity.fromCount(5, Optional.empty(), false, false), 1);
}
@Test
public void out_of_capacity_all_nodes_want_to_retire() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
List<Node> readyNodes = tester.makeReadyNodes(5, defaultResources);
readyNodes.forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
try {
prepare(application, 2, 0, 2, 0, defaultResources, tester);
fail("Expected exception");
} catch (OutOfCapacityException e) {
assertTrue(e.getMessage().startsWith("Could not satisfy request"));
}
}
@Test
public void want_to_retire_but_cannot_fail() {
Capacity capacity = Capacity.fromCount(5, Optional.of(defaultResources), false, true);
Capacity capacityFORCED = Capacity.fromCount(5, Optional.of(defaultResources), false, false);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build();
tester.activate(application, tester.prepare(application, cluster, capacity, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(0, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
tester.nodeRepository().getNodes(application, Node.State.active).forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
tester.activate(application, tester.prepare(application, cluster, capacityFORCED, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(0, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
tester.nodeRepository().getNodes(application, Node.State.active).forEach(node -> assertTrue(node.status().wantToRetire()));
tester.activate(application, tester.prepare(application, cluster, capacity, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
}
@Test
public void highest_node_indexes_are_retired_first() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(14, defaultResources);
SystemState state1 = prepare(application1, 3, 3, 4, 4, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 2, 2, defaultResources, tester);
tester.activate(application1, state2.allHosts);
assertFalse(state2.hostByMembership("content0", 0, 0).membership().get().retired());
assertFalse( state2.hostByMembership("content0", 0, 1).membership().get().retired());
assertTrue( state2.hostByMembership("content0", 0, 2).membership().get().retired());
assertTrue( state2.hostByMembership("content0", 0, 3).membership().get().retired());
assertFalse(state2.hostByMembership("content1", 0, 0).membership().get().retired());
assertFalse(state2.hostByMembership("content1", 0, 1).membership().get().retired());
assertTrue( state2.hostByMembership("content1", 0, 2).membership().get().retired());
assertTrue( state2.hostByMembership("content1", 0, 3).membership().get().retired());
}
@Test
public void application_deployment_retires_nodes_that_want_to_retire() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
{
SystemState state = prepare(application, 2, 0, 2, 0,defaultResources, tester);
tester.activate(application, state.allHosts);
assertEquals(4, tester.getNodes(application, Node.State.active).size());
}
{
List<Node> nodesToRetire = tester.getNodes(application, Node.State.active).asList().subList(0, 2);
nodesToRetire.forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
SystemState state = prepare(application, 2, 0, 2, 0, defaultResources, tester);
tester.activate(application, state.allHosts);
List<Node> retiredNodes = tester.getNodes(application).retired().asList();
assertEquals(2, retiredNodes.size());
assertTrue("Nodes are retired by system", retiredNodes.stream().allMatch(retiredBy(Agent.system)));
}
}
@Test
public void application_deployment_extends_existing_reservations_on_deploy() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(2, defaultResources);
try {
prepare(application, 2, 0, 2, 0, defaultResources, tester);
fail("Expected exception");
} catch (OutOfCapacityException ignored) {}
assertEquals("Reserved a subset of required nodes", 2,
tester.getNodes(application, Node.State.reserved).size());
tester.makeReadyNodes(2, defaultResources);
tester.clock().advance(Duration.ofMinutes(2));
SystemState state = prepare(application, 2, 0, 2, 0, defaultResources, tester);
List<Node> reserved = tester.getNodes(application, Node.State.reserved).asList();
assertEquals("Reserved required nodes", 4, reserved.size());
assertTrue("Time of event is updated for all nodes",
reserved.stream()
.allMatch(n -> n.history()
.event(History.Event.Type.reserved)
.get().at()
.equals(tester.clock().instant().truncatedTo(MILLIS))));
tester.clock().advance(Duration.ofMinutes(8).plus(Duration.ofSeconds(1)));
ReservationExpirer expirer = new ReservationExpirer(tester.nodeRepository(), tester.clock(),
Duration.ofMinutes(10));
expirer.run();
assertEquals("Nodes remain reserved", 4,
tester.getNodes(application, Node.State.reserved).size());
tester.activate(application, state.allHosts);
assertEquals(4, tester.getNodes(application, Node.State.active).size());
}
@Test
public void required_capacity_respects_prod_redundancy_requirement() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, "6.42", Optional.empty());
fail("Expected exception");
} catch (IllegalArgumentException ignored) {}
}
@Test
public void devsystem_application_deployment_on_devhost() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.dev, Environment.dev, RegionName.from("no-central"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.devhost, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.devhost);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void cluster_spec_update_for_already_reserved_nodes() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
String version1 = "6.42";
String version2 = "6.43";
tester.makeReadyNodes(2, defaultResources);
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, version1, Optional.empty());
tester.getNodes(application, Node.State.reserved).forEach(node ->
assertEquals(Version.fromString(version1), node.allocation().get().membership().cluster().vespaVersion()));
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, version2, Optional.empty());
tester.getNodes(application, Node.State.reserved).forEach(node ->
assertEquals(Version.fromString(version2), node.allocation().get().membership().cluster().vespaVersion()));
}
@Test
public void change_to_and_from_combined_cluster_does_not_change_node_allocation() {
var tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
var application = tester.makeApplicationId();
tester.makeReadyNodes(4, defaultResources);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
var initialNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
cluster = ClusterSpec.request(ClusterSpec.Type.combined, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
var newNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
assertEquals("Node allocation remains the same", initialNodes, newNodes);
assertEquals("Cluster type is updated",
Set.of(ClusterSpec.Type.combined),
newNodes.stream().map(n -> n.membership().get().cluster().type()).collect(Collectors.toSet()));
cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
newNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
assertEquals("Node allocation remains the same", initialNodes, newNodes);
assertEquals("Cluster type is updated",
Set.of(ClusterSpec.Type.content),
newNodes.stream().map(n -> n.membership().get().cluster().type()).collect(Collectors.toSet()));
}
private SystemState prepare(ApplicationId application, int container0Size, int container1Size, int content0Size,
int content1Size, NodeResources flavor, ProvisioningTester tester) {
return prepare(application, tester, container0Size, container1Size, content0Size, content1Size, flavor, "6.42");
}
private SystemState prepare(ApplicationId application, ProvisioningTester tester, int container0Size, int container1Size, int content0Size,
int content1Size, NodeResources nodeResources, String wantedVersion) {
return prepare(application, tester, container0Size, container1Size, content0Size, content1Size, false, nodeResources,
wantedVersion, Optional.empty());
}
private SystemState prepare(ApplicationId application, ProvisioningTester tester, int container0Size, int container1Size, int content0Size,
int content1Size, boolean required, NodeResources nodeResources, String wantedVersion, Optional<String> dockerImageRepo) {
ClusterSpec containerCluster0 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container0")).vespaVersion(wantedVersion).build();
ClusterSpec containerCluster1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container1")).vespaVersion(wantedVersion).build();
ClusterSpec contentCluster0 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content0")).vespaVersion(wantedVersion).build();
ClusterSpec contentCluster1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content1")).vespaVersion(wantedVersion).build();
Set<HostSpec> container0 = prepare(application, tester, containerCluster0, container0Size, 1, required, nodeResources);
Set<HostSpec> container1 = prepare(application, tester, containerCluster1, container1Size, 1, required, nodeResources);
Set<HostSpec> content0 = prepare(application, tester, contentCluster0, content0Size, 1, required, nodeResources);
Set<HostSpec> content1 = prepare(application, tester, contentCluster1, content1Size, 1, required, nodeResources);
Set<HostSpec> allHosts = new HashSet<>();
allHosts.addAll(container0);
allHosts.addAll(container1);
allHosts.addAll(content0);
allHosts.addAll(content1);
Function<Integer, Capacity> capacity = count -> Capacity.fromCount(count, Optional.empty(), required, true);
int expectedContainer0Size = tester.capacityPolicies().decideSize(capacity.apply(container0Size), containerCluster0, application);
int expectedContainer1Size = tester.capacityPolicies().decideSize(capacity.apply(container1Size), containerCluster1, application);
int expectedContent0Size = tester.capacityPolicies().decideSize(capacity.apply(content0Size), contentCluster0, application);
int expectedContent1Size = tester.capacityPolicies().decideSize(capacity.apply(content1Size), contentCluster1, application);
assertEquals("Hosts in each group cluster is disjunct and the total number of unretired nodes is correct",
expectedContainer0Size + expectedContainer1Size + expectedContent0Size + expectedContent1Size,
tester.nonRetired(allHosts).size());
assertEquals(expectedContainer0Size, tester.nonRetired(container0).size());
assertEquals(expectedContainer1Size, tester.nonRetired(container1).size());
assertEquals(expectedContent0Size, tester.nonRetired(content0).size());
assertEquals(expectedContent1Size, tester.nonRetired(content1).size());
tester.assertMembersOf(containerCluster0, container0);
tester.assertMembersOf(containerCluster1, container1);
tester.assertMembersOf(contentCluster0, content0);
tester.assertMembersOf(contentCluster1, content1);
return new SystemState(allHosts, container0, container1, content0, content1);
}
private Set<HostSpec> prepare(ApplicationId application, ProvisioningTester tester, ClusterSpec cluster, int nodeCount, int groups,
boolean required, NodeResources nodeResources) {
if (nodeCount == 0) return Collections.emptySet();
return new HashSet<>(tester.prepare(application, cluster, nodeCount, groups, required, nodeResources));
}
private static class SystemState {
private Set<HostSpec> allHosts;
private Set<HostSpec> container0;
private Set<HostSpec> container1;
private Set<HostSpec> content0;
private Set<HostSpec> content1;
public SystemState(Set<HostSpec> allHosts,
Set<HostSpec> container1,
Set<HostSpec> container2,
Set<HostSpec> content0,
Set<HostSpec> content1) {
this.allHosts = allHosts;
this.container0 = container1;
this.container1 = container2;
this.content0 = content0;
this.content1 = content1;
}
/** Returns a host by cluster name and index, or null if there is no host with the given values in this */
public HostSpec hostByMembership(String clusterId, int group, int index) {
for (HostSpec host : allHosts) {
if ( ! host.membership().isPresent()) continue;
ClusterMembership membership = host.membership().get();
if (membership.cluster().id().value().equals(clusterId) &&
groupMatches(membership.cluster().group(), group) &&
membership.index() == index)
return host;
}
return null;
}
private boolean groupMatches(Optional<ClusterSpec.Group> clusterGroup, int group) {
if ( ! clusterGroup.isPresent()) return group==0;
return clusterGroup.get().index() == group;
}
public Set<String> hostNames() {
return allHosts.stream().map(HostSpec::hostname).collect(Collectors.toSet());
}
public HostSpec removeHost(String hostname) {
for (Iterator<HostSpec> i = allHosts.iterator(); i.hasNext();) {
HostSpec host = i.next();
if (host.hostname().equals(hostname)) {
i.remove();
return host;
}
}
return null;
}
public void assertExtends(SystemState other) {
assertTrue(this.allHosts.containsAll(other.allHosts));
assertExtends(this.container0, other.container0);
assertExtends(this.container1, other.container1);
assertExtends(this.content0, other.content0);
assertExtends(this.content1, other.content1);
}
private void assertExtends(Set<HostSpec> extension,
Set<HostSpec> original) {
for (HostSpec originalHost : original) {
HostSpec newHost = findHost(originalHost.hostname(), extension);
org.junit.Assert.assertEquals(newHost.membership(), originalHost.membership());
}
}
private HostSpec findHost(String hostName, Set<HostSpec> hosts) {
for (HostSpec host : hosts)
if (host.hostname().equals(hostName))
return host;
return null;
}
public void assertEquals(SystemState other) {
org.junit.Assert.assertEquals(this.allHosts, other.allHosts);
org.junit.Assert.assertEquals(this.container0, other.container0);
org.junit.Assert.assertEquals(this.container1, other.container1);
org.junit.Assert.assertEquals(this.content0, other.content0);
org.junit.Assert.assertEquals(this.content1, other.content1);
}
}
/** A predicate that returns whether a node has been retired by the given agent */
private static Predicate<Node> retiredBy(Agent agent) {
return (node) -> node.history().event(History.Event.Type.retired)
.filter(e -> e.type() == History.Event.Type.retired)
.filter(e -> e.agent() == agent)
.isPresent();
}
} |
This actually shouldn't be allowed, current docker image should always include the version as well. | public void dockerImageRepoIsReturnedIfSet() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application1 = tester.makeApplicationId();
SystemState state1 = prepare(application1, tester, 1, 1, 1, 1, defaultResources, "1.2.3");
String dockerImageRepo = "docker.domain.tld/my/image";
prepare(application1, tester, 1, 1, 1 , 1 , false, defaultResources, "1.2.3", Optional.of(dockerImageRepo));
tester.activate(application1, state1.allHosts);
HostSpec host1 = state1.container0.iterator().next();
assertFalse(host1.version().isPresent());
Node node1 = tester.nodeRepository().getNode(host1.hostname()).get();
tester.nodeRepository().write(node1.with(node1.status().withDockerImage(DockerImage.fromString(dockerImageRepo))), () -> {});
SystemState state2 = prepare(application1, tester, 1, 1, 1 ,1 , false, defaultResources, "1.2.3", Optional.of(dockerImageRepo));
tester.activate(application1, state2.allHosts);
host1 = state2.container0.iterator().next();
assertEquals(dockerImageRepo, host1.dockerImageRepo().get());
} | tester.nodeRepository().write(node1.with(node1.status().withDockerImage(DockerImage.fromString(dockerImageRepo))), () -> {}); | public void dockerImageRepoIsReturnedIfSet() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application1 = tester.makeApplicationId();
SystemState state1 = prepare(application1, tester, 1, 1, 1, 1, defaultResources, "1.2.3");
String dockerImageRepo = "docker.domain.tld/my/image";
prepare(application1, tester, 1, 1, 1 , 1 , false, defaultResources, "1.2.3", Optional.of(dockerImageRepo));
tester.activate(application1, state1.allHosts);
HostSpec host1 = state1.container0.iterator().next();
Node node1 = tester.nodeRepository().getNode(host1.hostname()).get();
DockerImage dockerImage = DockerImage.fromString(dockerImageRepo).withTag(Version.fromString("1.2.3"));
tester.nodeRepository().write(node1.with(node1.status().withDockerImage(dockerImage)), () -> {});
SystemState state2 = prepare(application1, tester, 1, 1, 1 ,1 , false, defaultResources, "1.2.3", Optional.of(dockerImageRepo));
tester.activate(application1, state2.allHosts);
host1 = state2.container0.iterator().next();
node1 = tester.nodeRepository().getNode(host1.hostname()).get();
assertEquals(dockerImage, node1.status().dockerImage().get());
} | class ProvisioningTest {
private static final NodeResources defaultResources = new NodeResources(1, 4, 10, 4);
@Test
public void application_deployment_constant_application_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
ApplicationId application2 = tester.makeApplicationId();
tester.makeReadyNodes(21, defaultResources);
SystemState state1 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state2.assertEquals(state1);
tester.activate(application1, state2.allHosts);
SystemState state1App2 = prepare(application2, 2, 2, 3, 3, defaultResources, tester);
assertFalse("Hosts to different apps are disjunct", state1App2.allHosts.removeAll(state1.allHosts));
tester.activate(application2, state1App2.allHosts);
SystemState state3 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
SystemState state4 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state3.assertEquals(state2);
state4.assertEquals(state3);
tester.activate(application1, state4.allHosts);
SystemState state5 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
HostSpec removed = tester.removeOne(state5.allHosts);
tester.activate(application1, state5.allHosts);
assertEquals(removed.hostname(), tester.nodeRepository().getNodes(application1, Node.State.inactive).get(0).hostname());
SystemState state6 = prepare(application1, 0, 2, 0, 3, defaultResources, tester);
tester.activate(application1, state6.allHosts);
assertEquals(5, tester.getNodes(application1, Node.State.active).size());
assertEquals(5, tester.getNodes(application1, Node.State.inactive).size());
NestedTransaction removeTransaction = new NestedTransaction();
tester.provisioner().remove(removeTransaction, application1);
removeTransaction.commit();
assertEquals(tester.toHostNames(state1.allHosts), tester.toHostNames(tester.nodeRepository().getNodes(application1, Node.State.inactive)));
assertEquals(0, tester.getNodes(application1, Node.State.active).size());
assertEquals(state1App2.hostNames(), tester.toHostNames(tester.nodeRepository().getNodes(application2, Node.State.active)));
HostSpec failed = tester.removeOne(state1App2.allHosts);
tester.fail(failed);
assertEquals(9, tester.getNodes(application2, Node.State.active).size());
SystemState state2App2 = prepare(application2, 2, 2, 3, 3, defaultResources, tester);
assertFalse("Hosts to different apps are disjunct", state2App2.allHosts.removeAll(state1.allHosts));
assertEquals("A new node was reserved to replace the failed one", 10, state2App2.allHosts.size());
assertFalse("The new host is not the failed one", state2App2.allHosts.contains(failed));
tester.activate(application2, state2App2.allHosts);
SystemState state7 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state7.assertEquals(state1);
tester.activate(application1, state7.allHosts);
assertEquals(0, tester.getNodes(application1, Node.State.inactive).size());
HostFilter allFilter = HostFilter.all();
HostFilter hostFilter = HostFilter.hostname(state6.allHosts.iterator().next().hostname());
HostFilter clusterTypeFilter = HostFilter.clusterType(ClusterSpec.Type.container);
HostFilter clusterIdFilter = HostFilter.clusterId(ClusterSpec.Id.from("container1"));
tester.provisioner().restart(application1, allFilter);
tester.provisioner().restart(application1, hostFilter);
tester.provisioner().restart(application1, clusterTypeFilter);
tester.provisioner().restart(application1, clusterIdFilter);
tester.assertRestartCount(application1, allFilter, hostFilter, clusterTypeFilter, clusterIdFilter);
}
@Test
public void nodeVersionIsReturnedIfSet() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application1 = tester.makeApplicationId();
SystemState state1 = prepare(application1, 1, 1, 1, 1, defaultResources, tester);
tester.activate(application1, state1.allHosts);
HostSpec host1 = state1.container0.iterator().next();
assertFalse(host1.version().isPresent());
Node node1 = tester.nodeRepository().getNode(host1.hostname()).get();
tester.nodeRepository().write(node1.with(node1.status().withVespaVersion(Version.fromString("1.2.3"))), () -> {});
SystemState state2 = prepare(application1, 1, 1, 1, 1, defaultResources, tester);
tester.activate(application1, state2.allHosts);
host1 = state2.container0.iterator().next();
assertEquals(Version.fromString("1.2.3"), host1.version().get());
}
@Test
@Test
public void application_deployment_variable_application_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(24, defaultResources);
SystemState state1 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 3, 4, 4, 5, defaultResources, tester);
state2.assertExtends(state1);
assertEquals("New nodes are reserved", 6, tester.getNodes(application1, Node.State.reserved).size());
tester.activate(application1, state2.allHosts);
SystemState state3 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state3.allHosts);
assertEquals("Superfluous container nodes are deactivated",
3-2 + 4-2, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Superfluous content nodes are retired",
4-3 + 5-3, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state4 = prepare(application1, 4, 5, 5, 6, defaultResources, tester);
assertEquals("Inactive nodes are reused", 0, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Earlier retired nodes are not unretired before activate",
4-3 + 5-3, tester.getNodes(application1, Node.State.active).retired().size());
state4.assertExtends(state2);
assertEquals("New and inactive nodes are reserved", 4 + 3, tester.getNodes(application1, Node.State.reserved).size());
HostSpec removed = state4.removeHost(tester.getNodes(application1, Node.State.active).retired().asList().get(0).hostname());
tester.activate(application1, state4.allHosts);
assertEquals("Retired active removed when activating became inactive",
1, tester.getNodes(application1, Node.State.inactive).asList().size());
assertEquals(removed.hostname(), tester.getNodes(application1, Node.State.inactive).asList().get(0).hostname());
assertEquals("Earlier retired nodes are unretired on activate",
0, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state5 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state5.allHosts);
assertEquals("Superfluous container nodes are also deactivated",
4-2 + 5-2 + 1, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Superfluous content nodes are retired",
5-3 + 6-3 - 1, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state6 = prepare(application1, 2, 2, 4, 3, defaultResources, tester);
tester.activate(application1, state6.allHosts);
assertEquals("One content node is unretired",
5-4 + 6-3 - 1, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state7 = prepare(application1, 8, 2, 2, 2, defaultResources, tester);
NestedTransaction removeTransaction = new NestedTransaction();
tester.provisioner().remove(removeTransaction, application1);
removeTransaction.commit();
assertEquals(0, tester.getNodes(application1, Node.State.active).size());
assertEquals(0, tester.getNodes(application1, Node.State.reserved).size());
}
@Test
public void application_deployment_multiple_flavors() {
NodeResources small = new NodeResources(1, 4, 10, 0.3);
NodeResources large = new NodeResources(8, 8, 40, 0.3);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(12, small);
SystemState state1 = prepare(application1, 2, 2, 4, 4, small, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 3, 3, small, tester);
tester.activate(application1, state2.allHosts);
tester.makeReadyNodes(16, large);
SystemState state3 = prepare(application1, 3, 4, 4, 5, large, tester);
assertEquals("New nodes are reserved", 16, tester.nodeRepository().getNodes(application1, Node.State.reserved).size());
tester.activate(application1, state3.allHosts);
assertEquals("small container nodes are retired because we are swapping the entire cluster",
2 + 2, tester.getNodes(application1, Node.State.active).retired().type(ClusterSpec.Type.container).resources(small).size());
assertEquals("'small content nodes are retired",
4 + 4, tester.getNodes(application1, Node.State.active).retired().type(ClusterSpec.Type.content).resources(small).size());
assertEquals("No large content nodes are retired",
0, tester.getNodes(application1, Node.State.active).retired().resources(large).size());
}
@Test
public void application_deployment_above_then_at_capacity_limit() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(5, defaultResources);
SystemState state1 = prepare(application1, 2, 0, 3, 0, defaultResources, tester);
tester.activate(application1, state1.allHosts);
try {
SystemState state2 = prepare(application1, 3, 0, 3, 0, defaultResources, tester);
fail("Expected out of capacity exception");
}
catch (OutOfCapacityException expected) {
}
SystemState state3 = prepare(application1, 2, 0, 3, 0, defaultResources, tester);
tester.activate(application1, state3.allHosts);
}
@Test
public void dev_deployment_node_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void requested_resources_info_is_retained() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
{
SystemState state = prepare(application, 0, 0, 3, 3,
defaultResources.justNumbers(),
tester);
assertEquals(6, state.allHosts.size());
tester.activate(application, state.allHosts);
assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.any));
assertTrue(tester.nodeRepository().getNodes(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any));
}
{
SystemState state = prepare(application, 0, 0, 5, 3,
defaultResources,
tester);
assertEquals(8, state.allHosts.size());
tester.activate(application, state.allHosts);
assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.fast));
assertTrue(tester.nodeRepository().getNodes(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.fast));
}
}
@Test
public void deploy_specific_vespa_version() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, tester, 2, 2, 3, 3, defaultResources, "6.91");
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void deploy_specific_vespa_version_and_docker_image() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
String dockerImageRepo = "docker.domain.tld/my/image";
SystemState state = prepare(application, tester, 2, 2, 3, 3, false, defaultResources, "6.91", Optional.of(dockerImageRepo));
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void test_deployment_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.test, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(4, defaultResources);
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test(expected = IllegalArgumentException.class)
public void prod_deployment_requires_redundancy() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
prepare(application, 1, 2, 3, 3, defaultResources, tester);
}
/** Dev always uses the zone default flavor */
@Test
public void dev_deployment_flavor() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, new NodeResources(2, 4, 10, 2), NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3,
new NodeResources(2, 4, 10, 2), tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
/** Test always uses the zone default resources */
@Test
public void test_deployment_resources() {
NodeResources large = new NodeResources(2, 4, 10, 0.3);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.test, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(4, large);
SystemState state = prepare(application, 2, 2, 3, 3, large, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void staging_deployment_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.staging, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(14, defaultResources);
SystemState state = prepare(application, 1, 1, 1, 64, defaultResources, tester);
assertEquals(9, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void activate_after_reservation_timeout() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(10, defaultResources);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
NestedTransaction deactivateTransaction = new NestedTransaction();
tester.nodeRepository().deactivate(application, deactivateTransaction);
deactivateTransaction.commit();
try {
tester.activate(application, state.allHosts);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertTrue(e.getMessage().startsWith("Activation of " + application + " failed"));
}
}
@Test
public void out_of_capacity() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(9, defaultResources);
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, 2, 2, 3, 3, defaultResources, tester);
fail("Expected exception");
}
catch (OutOfCapacityException e) {
assertTrue(e.getMessage().startsWith("Could not satisfy request"));
}
}
@Test
public void out_of_quota() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.Public,
Environment.prod,
RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources);
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, 2, 2, 6, 3, defaultResources, tester);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("6 nodes [vcpu: 1.0, memory: 4.0 Gb, disk 10.0 Gb, bandwidth: 4.0 Gbps] requested for content cluster 'content0' 6.42 exceeds your quota. Resolve this at https:
e.getMessage());
}
}
@Test
public void no_out_of_quota_outside_public() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.main,
Environment.prod,
RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources);
ApplicationId application = tester.makeApplicationId();
prepare(application, 2, 2, 6, 3, defaultResources, tester);
}
@Test
public void out_of_capacity_but_cannot_fail() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources);
ApplicationId application = tester.makeApplicationId();
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build();
tester.prepare(application, cluster, Capacity.fromCount(5, Optional.empty(), false, false), 1);
}
@Test
public void out_of_capacity_all_nodes_want_to_retire() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
List<Node> readyNodes = tester.makeReadyNodes(5, defaultResources);
readyNodes.forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
try {
prepare(application, 2, 0, 2, 0, defaultResources, tester);
fail("Expected exception");
} catch (OutOfCapacityException e) {
assertTrue(e.getMessage().startsWith("Could not satisfy request"));
}
}
@Test
public void want_to_retire_but_cannot_fail() {
Capacity capacity = Capacity.fromCount(5, Optional.of(defaultResources), false, true);
Capacity capacityFORCED = Capacity.fromCount(5, Optional.of(defaultResources), false, false);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build();
tester.activate(application, tester.prepare(application, cluster, capacity, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(0, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
tester.nodeRepository().getNodes(application, Node.State.active).forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
tester.activate(application, tester.prepare(application, cluster, capacityFORCED, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(0, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
tester.nodeRepository().getNodes(application, Node.State.active).forEach(node -> assertTrue(node.status().wantToRetire()));
tester.activate(application, tester.prepare(application, cluster, capacity, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
}
@Test
public void highest_node_indexes_are_retired_first() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(14, defaultResources);
SystemState state1 = prepare(application1, 3, 3, 4, 4, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 2, 2, defaultResources, tester);
tester.activate(application1, state2.allHosts);
assertFalse(state2.hostByMembership("content0", 0, 0).membership().get().retired());
assertFalse( state2.hostByMembership("content0", 0, 1).membership().get().retired());
assertTrue( state2.hostByMembership("content0", 0, 2).membership().get().retired());
assertTrue( state2.hostByMembership("content0", 0, 3).membership().get().retired());
assertFalse(state2.hostByMembership("content1", 0, 0).membership().get().retired());
assertFalse(state2.hostByMembership("content1", 0, 1).membership().get().retired());
assertTrue( state2.hostByMembership("content1", 0, 2).membership().get().retired());
assertTrue( state2.hostByMembership("content1", 0, 3).membership().get().retired());
}
@Test
public void application_deployment_retires_nodes_that_want_to_retire() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
{
SystemState state = prepare(application, 2, 0, 2, 0,defaultResources, tester);
tester.activate(application, state.allHosts);
assertEquals(4, tester.getNodes(application, Node.State.active).size());
}
{
List<Node> nodesToRetire = tester.getNodes(application, Node.State.active).asList().subList(0, 2);
nodesToRetire.forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
SystemState state = prepare(application, 2, 0, 2, 0, defaultResources, tester);
tester.activate(application, state.allHosts);
List<Node> retiredNodes = tester.getNodes(application).retired().asList();
assertEquals(2, retiredNodes.size());
assertTrue("Nodes are retired by system", retiredNodes.stream().allMatch(retiredBy(Agent.system)));
}
}
@Test
public void application_deployment_extends_existing_reservations_on_deploy() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(2, defaultResources);
try {
prepare(application, 2, 0, 2, 0, defaultResources, tester);
fail("Expected exception");
} catch (OutOfCapacityException ignored) {}
assertEquals("Reserved a subset of required nodes", 2,
tester.getNodes(application, Node.State.reserved).size());
tester.makeReadyNodes(2, defaultResources);
tester.clock().advance(Duration.ofMinutes(2));
SystemState state = prepare(application, 2, 0, 2, 0, defaultResources, tester);
List<Node> reserved = tester.getNodes(application, Node.State.reserved).asList();
assertEquals("Reserved required nodes", 4, reserved.size());
assertTrue("Time of event is updated for all nodes",
reserved.stream()
.allMatch(n -> n.history()
.event(History.Event.Type.reserved)
.get().at()
.equals(tester.clock().instant().truncatedTo(MILLIS))));
tester.clock().advance(Duration.ofMinutes(8).plus(Duration.ofSeconds(1)));
ReservationExpirer expirer = new ReservationExpirer(tester.nodeRepository(), tester.clock(),
Duration.ofMinutes(10));
expirer.run();
assertEquals("Nodes remain reserved", 4,
tester.getNodes(application, Node.State.reserved).size());
tester.activate(application, state.allHosts);
assertEquals(4, tester.getNodes(application, Node.State.active).size());
}
@Test
public void required_capacity_respects_prod_redundancy_requirement() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, "6.42", Optional.empty());
fail("Expected exception");
} catch (IllegalArgumentException ignored) {}
}
@Test
public void devsystem_application_deployment_on_devhost() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.dev, Environment.dev, RegionName.from("no-central"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.devhost, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.devhost);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void cluster_spec_update_for_already_reserved_nodes() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
String version1 = "6.42";
String version2 = "6.43";
tester.makeReadyNodes(2, defaultResources);
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, version1, Optional.empty());
tester.getNodes(application, Node.State.reserved).forEach(node ->
assertEquals(Version.fromString(version1), node.allocation().get().membership().cluster().vespaVersion()));
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, version2, Optional.empty());
tester.getNodes(application, Node.State.reserved).forEach(node ->
assertEquals(Version.fromString(version2), node.allocation().get().membership().cluster().vespaVersion()));
}
@Test
public void change_to_and_from_combined_cluster_does_not_change_node_allocation() {
var tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
var application = tester.makeApplicationId();
tester.makeReadyNodes(4, defaultResources);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
var initialNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
cluster = ClusterSpec.request(ClusterSpec.Type.combined, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
var newNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
assertEquals("Node allocation remains the same", initialNodes, newNodes);
assertEquals("Cluster type is updated",
Set.of(ClusterSpec.Type.combined),
newNodes.stream().map(n -> n.membership().get().cluster().type()).collect(Collectors.toSet()));
cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
newNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
assertEquals("Node allocation remains the same", initialNodes, newNodes);
assertEquals("Cluster type is updated",
Set.of(ClusterSpec.Type.content),
newNodes.stream().map(n -> n.membership().get().cluster().type()).collect(Collectors.toSet()));
}
private SystemState prepare(ApplicationId application, int container0Size, int container1Size, int content0Size,
int content1Size, NodeResources flavor, ProvisioningTester tester) {
return prepare(application, tester, container0Size, container1Size, content0Size, content1Size, flavor, "6.42");
}
private SystemState prepare(ApplicationId application, ProvisioningTester tester, int container0Size, int container1Size, int content0Size,
int content1Size, NodeResources nodeResources, String wantedVersion) {
return prepare(application, tester, container0Size, container1Size, content0Size, content1Size, false, nodeResources,
wantedVersion, Optional.empty());
}
private SystemState prepare(ApplicationId application, ProvisioningTester tester, int container0Size, int container1Size, int content0Size,
int content1Size, boolean required, NodeResources nodeResources, String wantedVersion, Optional<String> dockerImageRepo) {
ClusterSpec containerCluster0 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container0")).vespaVersion(wantedVersion).build();
ClusterSpec containerCluster1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container1")).vespaVersion(wantedVersion).build();
ClusterSpec contentCluster0 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content0")).vespaVersion(wantedVersion).build();
ClusterSpec contentCluster1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content1")).vespaVersion(wantedVersion).build();
Set<HostSpec> container0 = prepare(application, tester, containerCluster0, container0Size, 1, required, nodeResources);
Set<HostSpec> container1 = prepare(application, tester, containerCluster1, container1Size, 1, required, nodeResources);
Set<HostSpec> content0 = prepare(application, tester, contentCluster0, content0Size, 1, required, nodeResources);
Set<HostSpec> content1 = prepare(application, tester, contentCluster1, content1Size, 1, required, nodeResources);
Set<HostSpec> allHosts = new HashSet<>();
allHosts.addAll(container0);
allHosts.addAll(container1);
allHosts.addAll(content0);
allHosts.addAll(content1);
Function<Integer, Capacity> capacity = count -> Capacity.fromCount(count, Optional.empty(), required, true);
int expectedContainer0Size = tester.capacityPolicies().decideSize(capacity.apply(container0Size), containerCluster0, application);
int expectedContainer1Size = tester.capacityPolicies().decideSize(capacity.apply(container1Size), containerCluster1, application);
int expectedContent0Size = tester.capacityPolicies().decideSize(capacity.apply(content0Size), contentCluster0, application);
int expectedContent1Size = tester.capacityPolicies().decideSize(capacity.apply(content1Size), contentCluster1, application);
assertEquals("Hosts in each group cluster is disjunct and the total number of unretired nodes is correct",
expectedContainer0Size + expectedContainer1Size + expectedContent0Size + expectedContent1Size,
tester.nonRetired(allHosts).size());
assertEquals(expectedContainer0Size, tester.nonRetired(container0).size());
assertEquals(expectedContainer1Size, tester.nonRetired(container1).size());
assertEquals(expectedContent0Size, tester.nonRetired(content0).size());
assertEquals(expectedContent1Size, tester.nonRetired(content1).size());
tester.assertMembersOf(containerCluster0, container0);
tester.assertMembersOf(containerCluster1, container1);
tester.assertMembersOf(contentCluster0, content0);
tester.assertMembersOf(contentCluster1, content1);
return new SystemState(allHosts, container0, container1, content0, content1);
}
private Set<HostSpec> prepare(ApplicationId application, ProvisioningTester tester, ClusterSpec cluster, int nodeCount, int groups,
boolean required, NodeResources nodeResources) {
if (nodeCount == 0) return Collections.emptySet();
return new HashSet<>(tester.prepare(application, cluster, nodeCount, groups, required, nodeResources));
}
private static class SystemState {
private Set<HostSpec> allHosts;
private Set<HostSpec> container0;
private Set<HostSpec> container1;
private Set<HostSpec> content0;
private Set<HostSpec> content1;
public SystemState(Set<HostSpec> allHosts,
Set<HostSpec> container1,
Set<HostSpec> container2,
Set<HostSpec> content0,
Set<HostSpec> content1) {
this.allHosts = allHosts;
this.container0 = container1;
this.container1 = container2;
this.content0 = content0;
this.content1 = content1;
}
/** Returns a host by cluster name and index, or null if there is no host with the given values in this */
public HostSpec hostByMembership(String clusterId, int group, int index) {
for (HostSpec host : allHosts) {
if ( ! host.membership().isPresent()) continue;
ClusterMembership membership = host.membership().get();
if (membership.cluster().id().value().equals(clusterId) &&
groupMatches(membership.cluster().group(), group) &&
membership.index() == index)
return host;
}
return null;
}
private boolean groupMatches(Optional<ClusterSpec.Group> clusterGroup, int group) {
if ( ! clusterGroup.isPresent()) return group==0;
return clusterGroup.get().index() == group;
}
public Set<String> hostNames() {
return allHosts.stream().map(HostSpec::hostname).collect(Collectors.toSet());
}
public HostSpec removeHost(String hostname) {
for (Iterator<HostSpec> i = allHosts.iterator(); i.hasNext();) {
HostSpec host = i.next();
if (host.hostname().equals(hostname)) {
i.remove();
return host;
}
}
return null;
}
public void assertExtends(SystemState other) {
assertTrue(this.allHosts.containsAll(other.allHosts));
assertExtends(this.container0, other.container0);
assertExtends(this.container1, other.container1);
assertExtends(this.content0, other.content0);
assertExtends(this.content1, other.content1);
}
private void assertExtends(Set<HostSpec> extension,
Set<HostSpec> original) {
for (HostSpec originalHost : original) {
HostSpec newHost = findHost(originalHost.hostname(), extension);
org.junit.Assert.assertEquals(newHost.membership(), originalHost.membership());
}
}
private HostSpec findHost(String hostName, Set<HostSpec> hosts) {
for (HostSpec host : hosts)
if (host.hostname().equals(hostName))
return host;
return null;
}
public void assertEquals(SystemState other) {
org.junit.Assert.assertEquals(this.allHosts, other.allHosts);
org.junit.Assert.assertEquals(this.container0, other.container0);
org.junit.Assert.assertEquals(this.container1, other.container1);
org.junit.Assert.assertEquals(this.content0, other.content0);
org.junit.Assert.assertEquals(this.content1, other.content1);
}
}
/** A predicate that returns whether a node has been retired by the given agent */
private static Predicate<Node> retiredBy(Agent agent) {
return (node) -> node.history().event(History.Event.Type.retired)
.filter(e -> e.type() == History.Event.Type.retired)
.filter(e -> e.agent() == agent)
.isPresent();
}
} | class ProvisioningTest {
private static final NodeResources defaultResources = new NodeResources(1, 4, 10, 4);
@Test
public void application_deployment_constant_application_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
ApplicationId application2 = tester.makeApplicationId();
tester.makeReadyNodes(21, defaultResources);
SystemState state1 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state2.assertEquals(state1);
tester.activate(application1, state2.allHosts);
SystemState state1App2 = prepare(application2, 2, 2, 3, 3, defaultResources, tester);
assertFalse("Hosts to different apps are disjunct", state1App2.allHosts.removeAll(state1.allHosts));
tester.activate(application2, state1App2.allHosts);
SystemState state3 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
SystemState state4 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state3.assertEquals(state2);
state4.assertEquals(state3);
tester.activate(application1, state4.allHosts);
SystemState state5 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
HostSpec removed = tester.removeOne(state5.allHosts);
tester.activate(application1, state5.allHosts);
assertEquals(removed.hostname(), tester.nodeRepository().getNodes(application1, Node.State.inactive).get(0).hostname());
SystemState state6 = prepare(application1, 0, 2, 0, 3, defaultResources, tester);
tester.activate(application1, state6.allHosts);
assertEquals(5, tester.getNodes(application1, Node.State.active).size());
assertEquals(5, tester.getNodes(application1, Node.State.inactive).size());
NestedTransaction removeTransaction = new NestedTransaction();
tester.provisioner().remove(removeTransaction, application1);
removeTransaction.commit();
assertEquals(tester.toHostNames(state1.allHosts), tester.toHostNames(tester.nodeRepository().getNodes(application1, Node.State.inactive)));
assertEquals(0, tester.getNodes(application1, Node.State.active).size());
assertEquals(state1App2.hostNames(), tester.toHostNames(tester.nodeRepository().getNodes(application2, Node.State.active)));
HostSpec failed = tester.removeOne(state1App2.allHosts);
tester.fail(failed);
assertEquals(9, tester.getNodes(application2, Node.State.active).size());
SystemState state2App2 = prepare(application2, 2, 2, 3, 3, defaultResources, tester);
assertFalse("Hosts to different apps are disjunct", state2App2.allHosts.removeAll(state1.allHosts));
assertEquals("A new node was reserved to replace the failed one", 10, state2App2.allHosts.size());
assertFalse("The new host is not the failed one", state2App2.allHosts.contains(failed));
tester.activate(application2, state2App2.allHosts);
SystemState state7 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state7.assertEquals(state1);
tester.activate(application1, state7.allHosts);
assertEquals(0, tester.getNodes(application1, Node.State.inactive).size());
HostFilter allFilter = HostFilter.all();
HostFilter hostFilter = HostFilter.hostname(state6.allHosts.iterator().next().hostname());
HostFilter clusterTypeFilter = HostFilter.clusterType(ClusterSpec.Type.container);
HostFilter clusterIdFilter = HostFilter.clusterId(ClusterSpec.Id.from("container1"));
tester.provisioner().restart(application1, allFilter);
tester.provisioner().restart(application1, hostFilter);
tester.provisioner().restart(application1, clusterTypeFilter);
tester.provisioner().restart(application1, clusterIdFilter);
tester.assertRestartCount(application1, allFilter, hostFilter, clusterTypeFilter, clusterIdFilter);
}
@Test
public void nodeVersionIsReturnedIfSet() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application1 = tester.makeApplicationId();
SystemState state1 = prepare(application1, 1, 1, 1, 1, defaultResources, tester);
tester.activate(application1, state1.allHosts);
HostSpec host1 = state1.container0.iterator().next();
assertFalse(host1.version().isPresent());
Node node1 = tester.nodeRepository().getNode(host1.hostname()).get();
tester.nodeRepository().write(node1.with(node1.status().withVespaVersion(Version.fromString("1.2.3"))), () -> {});
SystemState state2 = prepare(application1, 1, 1, 1, 1, defaultResources, tester);
tester.activate(application1, state2.allHosts);
host1 = state2.container0.iterator().next();
assertEquals(Version.fromString("1.2.3"), host1.version().get());
}
@Test
@Test
public void application_deployment_variable_application_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(24, defaultResources);
SystemState state1 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 3, 4, 4, 5, defaultResources, tester);
state2.assertExtends(state1);
assertEquals("New nodes are reserved", 6, tester.getNodes(application1, Node.State.reserved).size());
tester.activate(application1, state2.allHosts);
SystemState state3 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state3.allHosts);
assertEquals("Superfluous container nodes are deactivated",
3-2 + 4-2, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Superfluous content nodes are retired",
4-3 + 5-3, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state4 = prepare(application1, 4, 5, 5, 6, defaultResources, tester);
assertEquals("Inactive nodes are reused", 0, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Earlier retired nodes are not unretired before activate",
4-3 + 5-3, tester.getNodes(application1, Node.State.active).retired().size());
state4.assertExtends(state2);
assertEquals("New and inactive nodes are reserved", 4 + 3, tester.getNodes(application1, Node.State.reserved).size());
HostSpec removed = state4.removeHost(tester.getNodes(application1, Node.State.active).retired().asList().get(0).hostname());
tester.activate(application1, state4.allHosts);
assertEquals("Retired active removed when activating became inactive",
1, tester.getNodes(application1, Node.State.inactive).asList().size());
assertEquals(removed.hostname(), tester.getNodes(application1, Node.State.inactive).asList().get(0).hostname());
assertEquals("Earlier retired nodes are unretired on activate",
0, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state5 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state5.allHosts);
assertEquals("Superfluous container nodes are also deactivated",
4-2 + 5-2 + 1, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Superfluous content nodes are retired",
5-3 + 6-3 - 1, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state6 = prepare(application1, 2, 2, 4, 3, defaultResources, tester);
tester.activate(application1, state6.allHosts);
assertEquals("One content node is unretired",
5-4 + 6-3 - 1, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state7 = prepare(application1, 8, 2, 2, 2, defaultResources, tester);
NestedTransaction removeTransaction = new NestedTransaction();
tester.provisioner().remove(removeTransaction, application1);
removeTransaction.commit();
assertEquals(0, tester.getNodes(application1, Node.State.active).size());
assertEquals(0, tester.getNodes(application1, Node.State.reserved).size());
}
@Test
public void application_deployment_multiple_flavors() {
NodeResources small = new NodeResources(1, 4, 10, 0.3);
NodeResources large = new NodeResources(8, 8, 40, 0.3);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(12, small);
SystemState state1 = prepare(application1, 2, 2, 4, 4, small, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 3, 3, small, tester);
tester.activate(application1, state2.allHosts);
tester.makeReadyNodes(16, large);
SystemState state3 = prepare(application1, 3, 4, 4, 5, large, tester);
assertEquals("New nodes are reserved", 16, tester.nodeRepository().getNodes(application1, Node.State.reserved).size());
tester.activate(application1, state3.allHosts);
assertEquals("small container nodes are retired because we are swapping the entire cluster",
2 + 2, tester.getNodes(application1, Node.State.active).retired().type(ClusterSpec.Type.container).resources(small).size());
assertEquals("'small content nodes are retired",
4 + 4, tester.getNodes(application1, Node.State.active).retired().type(ClusterSpec.Type.content).resources(small).size());
assertEquals("No large content nodes are retired",
0, tester.getNodes(application1, Node.State.active).retired().resources(large).size());
}
@Test
public void application_deployment_above_then_at_capacity_limit() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(5, defaultResources);
SystemState state1 = prepare(application1, 2, 0, 3, 0, defaultResources, tester);
tester.activate(application1, state1.allHosts);
try {
SystemState state2 = prepare(application1, 3, 0, 3, 0, defaultResources, tester);
fail("Expected out of capacity exception");
}
catch (OutOfCapacityException expected) {
}
SystemState state3 = prepare(application1, 2, 0, 3, 0, defaultResources, tester);
tester.activate(application1, state3.allHosts);
}
@Test
public void dev_deployment_node_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void requested_resources_info_is_retained() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
{
SystemState state = prepare(application, 0, 0, 3, 3,
defaultResources.justNumbers(),
tester);
assertEquals(6, state.allHosts.size());
tester.activate(application, state.allHosts);
assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.any));
assertTrue(tester.nodeRepository().getNodes(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any));
}
{
SystemState state = prepare(application, 0, 0, 5, 3,
defaultResources,
tester);
assertEquals(8, state.allHosts.size());
tester.activate(application, state.allHosts);
assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.fast));
assertTrue(tester.nodeRepository().getNodes(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.fast));
}
}
@Test
public void deploy_specific_vespa_version() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, tester, 2, 2, 3, 3, defaultResources, "6.91");
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void deploy_specific_vespa_version_and_docker_image() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
String dockerImageRepo = "docker.domain.tld/my/image";
SystemState state = prepare(application, tester, 2, 2, 3, 3, false, defaultResources, "6.91", Optional.of(dockerImageRepo));
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void test_deployment_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.test, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(4, defaultResources);
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test(expected = IllegalArgumentException.class)
public void prod_deployment_requires_redundancy() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
prepare(application, 1, 2, 3, 3, defaultResources, tester);
}
/** Dev always uses the zone default flavor */
@Test
public void dev_deployment_flavor() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, new NodeResources(2, 4, 10, 2), NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3,
new NodeResources(2, 4, 10, 2), tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
/** Test always uses the zone default resources */
@Test
public void test_deployment_resources() {
NodeResources large = new NodeResources(2, 4, 10, 0.3);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.test, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(4, large);
SystemState state = prepare(application, 2, 2, 3, 3, large, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void staging_deployment_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.staging, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(14, defaultResources);
SystemState state = prepare(application, 1, 1, 1, 64, defaultResources, tester);
assertEquals(9, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void activate_after_reservation_timeout() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(10, defaultResources);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
NestedTransaction deactivateTransaction = new NestedTransaction();
tester.nodeRepository().deactivate(application, deactivateTransaction);
deactivateTransaction.commit();
try {
tester.activate(application, state.allHosts);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertTrue(e.getMessage().startsWith("Activation of " + application + " failed"));
}
}
@Test
public void out_of_capacity() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(9, defaultResources);
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, 2, 2, 3, 3, defaultResources, tester);
fail("Expected exception");
}
catch (OutOfCapacityException e) {
assertTrue(e.getMessage().startsWith("Could not satisfy request"));
}
}
@Test
public void out_of_quota() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.Public,
Environment.prod,
RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources);
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, 2, 2, 6, 3, defaultResources, tester);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("6 nodes [vcpu: 1.0, memory: 4.0 Gb, disk 10.0 Gb, bandwidth: 4.0 Gbps] requested for content cluster 'content0' 6.42 exceeds your quota. Resolve this at https:
e.getMessage());
}
}
@Test
public void no_out_of_quota_outside_public() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.main,
Environment.prod,
RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources);
ApplicationId application = tester.makeApplicationId();
prepare(application, 2, 2, 6, 3, defaultResources, tester);
}
@Test
public void out_of_capacity_but_cannot_fail() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources);
ApplicationId application = tester.makeApplicationId();
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build();
tester.prepare(application, cluster, Capacity.fromCount(5, Optional.empty(), false, false), 1);
}
@Test
public void out_of_capacity_all_nodes_want_to_retire() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
List<Node> readyNodes = tester.makeReadyNodes(5, defaultResources);
readyNodes.forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
try {
prepare(application, 2, 0, 2, 0, defaultResources, tester);
fail("Expected exception");
} catch (OutOfCapacityException e) {
assertTrue(e.getMessage().startsWith("Could not satisfy request"));
}
}
@Test
public void want_to_retire_but_cannot_fail() {
Capacity capacity = Capacity.fromCount(5, Optional.of(defaultResources), false, true);
Capacity capacityFORCED = Capacity.fromCount(5, Optional.of(defaultResources), false, false);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build();
tester.activate(application, tester.prepare(application, cluster, capacity, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(0, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
tester.nodeRepository().getNodes(application, Node.State.active).forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
tester.activate(application, tester.prepare(application, cluster, capacityFORCED, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(0, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
tester.nodeRepository().getNodes(application, Node.State.active).forEach(node -> assertTrue(node.status().wantToRetire()));
tester.activate(application, tester.prepare(application, cluster, capacity, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
}
@Test
public void highest_node_indexes_are_retired_first() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(14, defaultResources);
SystemState state1 = prepare(application1, 3, 3, 4, 4, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 2, 2, defaultResources, tester);
tester.activate(application1, state2.allHosts);
assertFalse(state2.hostByMembership("content0", 0, 0).membership().get().retired());
assertFalse( state2.hostByMembership("content0", 0, 1).membership().get().retired());
assertTrue( state2.hostByMembership("content0", 0, 2).membership().get().retired());
assertTrue( state2.hostByMembership("content0", 0, 3).membership().get().retired());
assertFalse(state2.hostByMembership("content1", 0, 0).membership().get().retired());
assertFalse(state2.hostByMembership("content1", 0, 1).membership().get().retired());
assertTrue( state2.hostByMembership("content1", 0, 2).membership().get().retired());
assertTrue( state2.hostByMembership("content1", 0, 3).membership().get().retired());
}
@Test
public void application_deployment_retires_nodes_that_want_to_retire() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
{
SystemState state = prepare(application, 2, 0, 2, 0,defaultResources, tester);
tester.activate(application, state.allHosts);
assertEquals(4, tester.getNodes(application, Node.State.active).size());
}
{
List<Node> nodesToRetire = tester.getNodes(application, Node.State.active).asList().subList(0, 2);
nodesToRetire.forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
SystemState state = prepare(application, 2, 0, 2, 0, defaultResources, tester);
tester.activate(application, state.allHosts);
List<Node> retiredNodes = tester.getNodes(application).retired().asList();
assertEquals(2, retiredNodes.size());
assertTrue("Nodes are retired by system", retiredNodes.stream().allMatch(retiredBy(Agent.system)));
}
}
@Test
public void application_deployment_extends_existing_reservations_on_deploy() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(2, defaultResources);
try {
prepare(application, 2, 0, 2, 0, defaultResources, tester);
fail("Expected exception");
} catch (OutOfCapacityException ignored) {}
assertEquals("Reserved a subset of required nodes", 2,
tester.getNodes(application, Node.State.reserved).size());
tester.makeReadyNodes(2, defaultResources);
tester.clock().advance(Duration.ofMinutes(2));
SystemState state = prepare(application, 2, 0, 2, 0, defaultResources, tester);
List<Node> reserved = tester.getNodes(application, Node.State.reserved).asList();
assertEquals("Reserved required nodes", 4, reserved.size());
assertTrue("Time of event is updated for all nodes",
reserved.stream()
.allMatch(n -> n.history()
.event(History.Event.Type.reserved)
.get().at()
.equals(tester.clock().instant().truncatedTo(MILLIS))));
tester.clock().advance(Duration.ofMinutes(8).plus(Duration.ofSeconds(1)));
ReservationExpirer expirer = new ReservationExpirer(tester.nodeRepository(), tester.clock(),
Duration.ofMinutes(10));
expirer.run();
assertEquals("Nodes remain reserved", 4,
tester.getNodes(application, Node.State.reserved).size());
tester.activate(application, state.allHosts);
assertEquals(4, tester.getNodes(application, Node.State.active).size());
}
@Test
public void required_capacity_respects_prod_redundancy_requirement() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, "6.42", Optional.empty());
fail("Expected exception");
} catch (IllegalArgumentException ignored) {}
}
@Test
public void devsystem_application_deployment_on_devhost() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.dev, Environment.dev, RegionName.from("no-central"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.devhost, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.devhost);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void cluster_spec_update_for_already_reserved_nodes() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
String version1 = "6.42";
String version2 = "6.43";
tester.makeReadyNodes(2, defaultResources);
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, version1, Optional.empty());
tester.getNodes(application, Node.State.reserved).forEach(node ->
assertEquals(Version.fromString(version1), node.allocation().get().membership().cluster().vespaVersion()));
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, version2, Optional.empty());
tester.getNodes(application, Node.State.reserved).forEach(node ->
assertEquals(Version.fromString(version2), node.allocation().get().membership().cluster().vespaVersion()));
}
@Test
public void change_to_and_from_combined_cluster_does_not_change_node_allocation() {
var tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
var application = tester.makeApplicationId();
tester.makeReadyNodes(4, defaultResources);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
var initialNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
cluster = ClusterSpec.request(ClusterSpec.Type.combined, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
var newNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
assertEquals("Node allocation remains the same", initialNodes, newNodes);
assertEquals("Cluster type is updated",
Set.of(ClusterSpec.Type.combined),
newNodes.stream().map(n -> n.membership().get().cluster().type()).collect(Collectors.toSet()));
cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
newNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
assertEquals("Node allocation remains the same", initialNodes, newNodes);
assertEquals("Cluster type is updated",
Set.of(ClusterSpec.Type.content),
newNodes.stream().map(n -> n.membership().get().cluster().type()).collect(Collectors.toSet()));
}
private SystemState prepare(ApplicationId application, int container0Size, int container1Size, int content0Size,
int content1Size, NodeResources flavor, ProvisioningTester tester) {
return prepare(application, tester, container0Size, container1Size, content0Size, content1Size, flavor, "6.42");
}
private SystemState prepare(ApplicationId application, ProvisioningTester tester, int container0Size, int container1Size, int content0Size,
int content1Size, NodeResources nodeResources, String wantedVersion) {
return prepare(application, tester, container0Size, container1Size, content0Size, content1Size, false, nodeResources,
wantedVersion, Optional.empty());
}
private SystemState prepare(ApplicationId application, ProvisioningTester tester, int container0Size, int container1Size, int content0Size,
int content1Size, boolean required, NodeResources nodeResources, String wantedVersion, Optional<String> dockerImageRepo) {
ClusterSpec containerCluster0 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container0")).vespaVersion(wantedVersion).build();
ClusterSpec containerCluster1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container1")).vespaVersion(wantedVersion).build();
ClusterSpec contentCluster0 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content0")).vespaVersion(wantedVersion).build();
ClusterSpec contentCluster1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content1")).vespaVersion(wantedVersion).build();
Set<HostSpec> container0 = prepare(application, tester, containerCluster0, container0Size, 1, required, nodeResources);
Set<HostSpec> container1 = prepare(application, tester, containerCluster1, container1Size, 1, required, nodeResources);
Set<HostSpec> content0 = prepare(application, tester, contentCluster0, content0Size, 1, required, nodeResources);
Set<HostSpec> content1 = prepare(application, tester, contentCluster1, content1Size, 1, required, nodeResources);
Set<HostSpec> allHosts = new HashSet<>();
allHosts.addAll(container0);
allHosts.addAll(container1);
allHosts.addAll(content0);
allHosts.addAll(content1);
Function<Integer, Capacity> capacity = count -> Capacity.fromCount(count, Optional.empty(), required, true);
int expectedContainer0Size = tester.capacityPolicies().decideSize(capacity.apply(container0Size), containerCluster0, application);
int expectedContainer1Size = tester.capacityPolicies().decideSize(capacity.apply(container1Size), containerCluster1, application);
int expectedContent0Size = tester.capacityPolicies().decideSize(capacity.apply(content0Size), contentCluster0, application);
int expectedContent1Size = tester.capacityPolicies().decideSize(capacity.apply(content1Size), contentCluster1, application);
assertEquals("Hosts in each group cluster is disjunct and the total number of unretired nodes is correct",
expectedContainer0Size + expectedContainer1Size + expectedContent0Size + expectedContent1Size,
tester.nonRetired(allHosts).size());
assertEquals(expectedContainer0Size, tester.nonRetired(container0).size());
assertEquals(expectedContainer1Size, tester.nonRetired(container1).size());
assertEquals(expectedContent0Size, tester.nonRetired(content0).size());
assertEquals(expectedContent1Size, tester.nonRetired(content1).size());
tester.assertMembersOf(containerCluster0, container0);
tester.assertMembersOf(containerCluster1, container1);
tester.assertMembersOf(contentCluster0, content0);
tester.assertMembersOf(contentCluster1, content1);
return new SystemState(allHosts, container0, container1, content0, content1);
}
private Set<HostSpec> prepare(ApplicationId application, ProvisioningTester tester, ClusterSpec cluster, int nodeCount, int groups,
boolean required, NodeResources nodeResources) {
if (nodeCount == 0) return Collections.emptySet();
return new HashSet<>(tester.prepare(application, cluster, nodeCount, groups, required, nodeResources));
}
private static class SystemState {
private Set<HostSpec> allHosts;
private Set<HostSpec> container0;
private Set<HostSpec> container1;
private Set<HostSpec> content0;
private Set<HostSpec> content1;
public SystemState(Set<HostSpec> allHosts,
Set<HostSpec> container1,
Set<HostSpec> container2,
Set<HostSpec> content0,
Set<HostSpec> content1) {
this.allHosts = allHosts;
this.container0 = container1;
this.container1 = container2;
this.content0 = content0;
this.content1 = content1;
}
/** Returns a host by cluster name and index, or null if there is no host with the given values in this */
public HostSpec hostByMembership(String clusterId, int group, int index) {
for (HostSpec host : allHosts) {
if ( ! host.membership().isPresent()) continue;
ClusterMembership membership = host.membership().get();
if (membership.cluster().id().value().equals(clusterId) &&
groupMatches(membership.cluster().group(), group) &&
membership.index() == index)
return host;
}
return null;
}
private boolean groupMatches(Optional<ClusterSpec.Group> clusterGroup, int group) {
if ( ! clusterGroup.isPresent()) return group==0;
return clusterGroup.get().index() == group;
}
public Set<String> hostNames() {
return allHosts.stream().map(HostSpec::hostname).collect(Collectors.toSet());
}
public HostSpec removeHost(String hostname) {
for (Iterator<HostSpec> i = allHosts.iterator(); i.hasNext();) {
HostSpec host = i.next();
if (host.hostname().equals(hostname)) {
i.remove();
return host;
}
}
return null;
}
public void assertExtends(SystemState other) {
assertTrue(this.allHosts.containsAll(other.allHosts));
assertExtends(this.container0, other.container0);
assertExtends(this.container1, other.container1);
assertExtends(this.content0, other.content0);
assertExtends(this.content1, other.content1);
}
private void assertExtends(Set<HostSpec> extension,
Set<HostSpec> original) {
for (HostSpec originalHost : original) {
HostSpec newHost = findHost(originalHost.hostname(), extension);
org.junit.Assert.assertEquals(newHost.membership(), originalHost.membership());
}
}
private HostSpec findHost(String hostName, Set<HostSpec> hosts) {
for (HostSpec host : hosts)
if (host.hostname().equals(hostName))
return host;
return null;
}
public void assertEquals(SystemState other) {
org.junit.Assert.assertEquals(this.allHosts, other.allHosts);
org.junit.Assert.assertEquals(this.container0, other.container0);
org.junit.Assert.assertEquals(this.container1, other.container1);
org.junit.Assert.assertEquals(this.content0, other.content0);
org.junit.Assert.assertEquals(this.content1, other.content1);
}
}
/** A predicate that returns whether a node has been retired by the given agent */
private static Predicate<Node> retiredBy(Agent agent) {
return (node) -> node.history().event(History.Event.Type.retired)
.filter(e -> e.type() == History.Event.Type.retired)
.filter(e -> e.agent() == agent)
.isPresent();
}
} |
Yeah, fixed | private void toSlime(Allocation allocation, Cursor object) {
toSlime(allocation.requestedResources(), object.setObject(requestedResourcesKey));
object.setString(tenantIdKey, allocation.owner().tenant().value());
object.setString(applicationIdKey, allocation.owner().application().value());
object.setString(instanceIdKey, allocation.owner().instance().value());
object.setString(serviceIdKey, allocation.membership().stringValue());
object.setLong(restartGenerationKey, allocation.restartGeneration().wanted());
object.setLong(currentRestartGenerationKey, allocation.restartGeneration().current());
object.setBool(removableKey, allocation.isRemovable());
object.setString(wantedVespaVersionKey, allocation.membership().cluster().vespaVersion().toString());
object.setString(wantedDockerImageRepoKey, allocation.membership().cluster().dockerImageRepo().orElse(""));
allocation.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray(networkPortsKey)));
} | object.setString(wantedDockerImageRepoKey, allocation.membership().cluster().dockerImageRepo().orElse("")); | private void toSlime(Allocation allocation, Cursor object) {
toSlime(allocation.requestedResources(), object.setObject(requestedResourcesKey));
object.setString(tenantIdKey, allocation.owner().tenant().value());
object.setString(applicationIdKey, allocation.owner().application().value());
object.setString(instanceIdKey, allocation.owner().instance().value());
object.setString(serviceIdKey, allocation.membership().stringValue());
object.setLong(restartGenerationKey, allocation.restartGeneration().wanted());
object.setLong(currentRestartGenerationKey, allocation.restartGeneration().current());
object.setBool(removableKey, allocation.isRemovable());
object.setString(wantedVespaVersionKey, allocation.membership().cluster().vespaVersion().toString());
allocation.membership().cluster().dockerImageRepo().ifPresent(repo -> object.setString(wantedDockerImageRepoKey, repo));
allocation.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray(networkPortsKey)));
} | class NodeSerializer {
/** The configured node flavors */
private final NodeFlavors flavors;
private static final String hostnameKey = "hostname";
private static final String ipAddressesKey = "ipAddresses";
private static final String ipAddressPoolKey = "additionalIpAddresses";
private static final String idKey = "openStackId";
private static final String parentHostnameKey = "parentHostname";
private static final String historyKey = "history";
private static final String instanceKey = "instance";
private static final String rebootGenerationKey = "rebootGeneration";
private static final String currentRebootGenerationKey = "currentRebootGeneration";
private static final String vespaVersionKey = "vespaVersion";
private static final String currentDockerImageKey = "currentDockerImage";
private static final String failCountKey = "failCount";
private static final String nodeTypeKey = "type";
private static final String wantToRetireKey = "wantToRetire";
private static final String wantToDeprovisionKey = "wantToDeprovision";
private static final String osVersionKey = "osVersion";
private static final String wantedOsVersionKey = "wantedOsVersion";
private static final String firmwareCheckKey = "firmwareCheck";
private static final String reportsKey = "reports";
private static final String modelNameKey = "modelName";
private static final String reservedToKey = "reservedTo";
private static final String flavorKey = "flavor";
private static final String resourcesKey = "resources";
private static final String vcpuKey = "vcpu";
private static final String memoryKey = "memory";
private static final String diskKey = "disk";
private static final String bandwidthKey = "bandwidth";
private static final String diskSpeedKey = "diskSpeed";
private static final String storageTypeKey = "storageType";
private static final String tenantIdKey = "tenantId";
private static final String applicationIdKey = "applicationId";
private static final String instanceIdKey = "instanceId";
private static final String serviceIdKey = "serviceId";
private static final String requestedResourcesKey = "requestedResources";
private static final String restartGenerationKey = "restartGeneration";
private static final String currentRestartGenerationKey = "currentRestartGeneration";
private static final String removableKey = "removable";
private static final String wantedVespaVersionKey = "wantedVespaVersion";
private static final String wantedDockerImageRepoKey = "wantedDockerImageRepo";
private static final String historyEventTypeKey = "type";
private static final String atKey = "at";
private static final String agentKey = "agent";
private static final String networkPortsKey = "networkPorts";
public NodeSerializer(NodeFlavors flavors) {
this.flavors = flavors;
}
public byte[] toJson(Node node) {
try {
Slime slime = new Slime();
toSlime(node, slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
catch (IOException e) {
throw new RuntimeException("Serialization of " + node + " to json failed", e);
}
}
private void toSlime(Node node, Cursor object) {
object.setString(hostnameKey, node.hostname());
toSlime(node.ipConfig().primary(), object.setArray(ipAddressesKey), IP.Config::require);
toSlime(node.ipConfig().pool().asSet(), object.setArray(ipAddressPoolKey), UnaryOperator.identity() /* Pool already holds a validated address list */);
object.setString(idKey, node.id());
node.parentHostname().ifPresent(hostname -> object.setString(parentHostnameKey, hostname));
toSlime(node.flavor(), object);
object.setLong(rebootGenerationKey, node.status().reboot().wanted());
object.setLong(currentRebootGenerationKey, node.status().reboot().current());
node.status().vespaVersion().ifPresent(version -> object.setString(vespaVersionKey, version.toString()));
node.status().dockerImage().ifPresent(image -> object.setString(currentDockerImageKey, image.asString()));
object.setLong(failCountKey, node.status().failCount());
object.setBool(wantToRetireKey, node.status().wantToRetire());
object.setBool(wantToDeprovisionKey, node.status().wantToDeprovision());
node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey)));
toSlime(node.history(), object.setArray(historyKey));
object.setString(nodeTypeKey, toString(node.type()));
node.status().osVersion().current().ifPresent(version -> object.setString(osVersionKey, version.toString()));
node.status().osVersion().wanted().ifPresent(version -> object.setString(wantedOsVersionKey, version.toFullString()));
node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong(firmwareCheckKey, instant.toEpochMilli()));
node.reports().toSlime(object, reportsKey);
node.modelName().ifPresent(modelName -> object.setString(modelNameKey, modelName));
node.reservedTo().ifPresent(tenant -> object.setString(reservedToKey, tenant.value()));
}
private void toSlime(Flavor flavor, Cursor object) {
if (flavor.isConfigured()) {
object.setString(flavorKey, flavor.name());
if (flavor.flavorOverrides().isPresent()) {
Cursor resourcesObject = object.setObject(resourcesKey);
flavor.flavorOverrides().get().diskGb().ifPresent(diskGb -> resourcesObject.setDouble(diskKey, diskGb));
}
}
else {
toSlime(flavor.resources(), object.setObject(resourcesKey));
}
}
private void toSlime(NodeResources resources, Cursor resourcesObject) {
resourcesObject.setDouble(vcpuKey, resources.vcpu());
resourcesObject.setDouble(memoryKey, resources.memoryGb());
resourcesObject.setDouble(diskKey, resources.diskGb());
resourcesObject.setDouble(bandwidthKey, resources.bandwidthGbps());
resourcesObject.setString(diskSpeedKey, diskSpeedToString(resources.diskSpeed()));
resourcesObject.setString(storageTypeKey, storageTypeToString(resources.storageType()));
}
private void toSlime(History history, Cursor array) {
for (History.Event event : history.events())
toSlime(event, array.addObject());
}
private void toSlime(History.Event event, Cursor object) {
object.setString(historyEventTypeKey, toString(event.type()));
object.setLong(atKey, event.at().toEpochMilli());
object.setString(agentKey, toString(event.agent()));
}
private void toSlime(Set<String> ipAddresses, Cursor array, UnaryOperator<Set<String>> validator) {
validator.apply(ipAddresses).stream().sorted(IP.NATURAL_ORDER).forEach(array::addString);
}
public Node fromJson(Node.State state, byte[] data) {
return nodeFromSlime(state, SlimeUtils.jsonToSlime(data).get());
}
private Node nodeFromSlime(Node.State state, Inspector object) {
Flavor flavor = flavorFromSlime(object);
return new Node(object.field(idKey).asString(),
new IP.Config(ipAddressesFromSlime(object, ipAddressesKey),
ipAddressesFromSlime(object, ipAddressPoolKey)),
object.field(hostnameKey).asString(),
parentHostnameFromSlime(object),
flavor,
statusFromSlime(object),
state,
allocationFromSlime(flavor.resources(), object.field(instanceKey)),
historyFromSlime(object.field(historyKey)),
nodeTypeFromString(object.field(nodeTypeKey).asString()),
Reports.fromSlime(object.field(reportsKey)),
modelNameFromSlime(object),
reservedToFromSlime(object.field(reservedToKey)));
}
private Status statusFromSlime(Inspector object) {
return new Status(generationFromSlime(object, rebootGenerationKey, currentRebootGenerationKey),
versionFromSlime(object.field(vespaVersionKey)),
dockerImageFromSlime(object.field(currentDockerImageKey)),
(int) object.field(failCountKey).asLong(),
object.field(wantToRetireKey).asBool(),
object.field(wantToDeprovisionKey).asBool(),
new OsVersion(versionFromSlime(object.field(osVersionKey)),
versionFromSlime(object.field(wantedOsVersionKey))),
instantFromSlime(object.field(firmwareCheckKey)));
}
private Flavor flavorFromSlime(Inspector object) {
Inspector resources = object.field(resourcesKey);
if (object.field(flavorKey).valid()) {
Flavor flavor = flavors.getFlavorOrThrow(object.field(flavorKey).asString());
if (!resources.valid()) return flavor;
return flavor.with(FlavorOverrides.ofDisk(resources.field(diskKey).asDouble()));
}
else {
return new Flavor(resourcesFromSlime(resources).get());
}
}
private Optional<NodeResources> resourcesFromSlime(Inspector resources) {
if ( ! resources.valid()) return Optional.empty();
return Optional.of(new NodeResources(resources.field(vcpuKey).asDouble(),
resources.field(memoryKey).asDouble(),
resources.field(diskKey).asDouble(),
resources.field(bandwidthKey).asDouble(),
diskSpeedFromSlime(resources.field(diskSpeedKey)),
storageTypeFromSlime(resources.field(storageTypeKey))));
}
private Optional<Allocation> allocationFromSlime(NodeResources assignedResources, Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(new Allocation(applicationIdFromSlime(object),
clusterMembershipFromSlime(object),
resourcesFromSlime(object.field(requestedResourcesKey)).orElse(assignedResources),
generationFromSlime(object, restartGenerationKey, currentRestartGenerationKey),
object.field(removableKey).asBool(),
NetworkPortsSerializer.fromSlime(object.field(networkPortsKey))));
}
private ApplicationId applicationIdFromSlime(Inspector object) {
return ApplicationId.from(TenantName.from(object.field(tenantIdKey).asString()),
ApplicationName.from(object.field(applicationIdKey).asString()),
InstanceName.from(object.field(instanceIdKey).asString()));
}
private History historyFromSlime(Inspector array) {
List<History.Event> events = new ArrayList<>();
array.traverse((ArrayTraverser) (int i, Inspector item) -> {
History.Event event = eventFromSlime(item);
if (event != null)
events.add(event);
});
return new History(events);
}
private History.Event eventFromSlime(Inspector object) {
History.Event.Type type = eventTypeFromString(object.field(historyEventTypeKey).asString());
if (type == null) return null;
Instant at = Instant.ofEpochMilli(object.field(atKey).asLong());
Agent agent = eventAgentFromSlime(object.field(agentKey));
return new History.Event(type, agent, at);
}
private Generation generationFromSlime(Inspector object, String wantedField, String currentField) {
Inspector current = object.field(currentField);
return new Generation(object.field(wantedField).asLong(), current.asLong());
}
private ClusterMembership clusterMembershipFromSlime(Inspector object) {
return ClusterMembership.from(object.field(serviceIdKey).asString(),
versionFromSlime(object.field(wantedVespaVersionKey)).get(),
dockerImageRepoFromSlime(object.field(wantedDockerImageRepoKey)));
}
private Optional<Version> versionFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(Version.fromString(object.asString()));
}
private Optional<String> dockerImageRepoFromSlime(Inspector object) {
if ( ! object.valid() || object.asString().isEmpty()) return Optional.empty();
return Optional.of(object.asString());
}
private Optional<DockerImage> dockerImageFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(DockerImage.fromString(object.asString()));
}
private Optional<Instant> instantFromSlime(Inspector object) {
if ( ! object.valid())
return Optional.empty();
return Optional.of(Instant.ofEpochMilli(object.asLong()));
}
private Optional<String> parentHostnameFromSlime(Inspector object) {
if (object.field(parentHostnameKey).valid())
return Optional.of(object.field(parentHostnameKey).asString());
else
return Optional.empty();
}
private Set<String> ipAddressesFromSlime(Inspector object, String key) {
ImmutableSet.Builder<String> ipAddresses = ImmutableSet.builder();
object.field(key).traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString()));
return ipAddresses.build();
}
private Optional<String> modelNameFromSlime(Inspector object) {
if (object.field(modelNameKey).valid()) {
return Optional.of(object.field(modelNameKey).asString());
}
return Optional.empty();
}
private Optional<TenantName> reservedToFromSlime(Inspector object) {
if (! object.valid()) return Optional.empty();
if (object.type() != Type.STRING)
throw new IllegalArgumentException("Expected 'reservedTo' to be a string but is " + object);
return Optional.of(TenantName.from(object.asString()));
}
/** Returns the event type, or null if this event type should be ignored */
private History.Event.Type eventTypeFromString(String eventTypeString) {
switch (eventTypeString) {
case "provisioned" : return History.Event.Type.provisioned;
case "deprovisioned" : return History.Event.Type.deprovisioned;
case "readied" : return History.Event.Type.readied;
case "reserved" : return History.Event.Type.reserved;
case "activated" : return History.Event.Type.activated;
case "wantToRetire": return History.Event.Type.wantToRetire;
case "retired" : return History.Event.Type.retired;
case "deactivated" : return History.Event.Type.deactivated;
case "parked" : return History.Event.Type.parked;
case "failed" : return History.Event.Type.failed;
case "deallocated" : return History.Event.Type.deallocated;
case "down" : return History.Event.Type.down;
case "requested" : return History.Event.Type.requested;
case "rebooted" : return History.Event.Type.rebooted;
case "osUpgraded" : return History.Event.Type.osUpgraded;
case "firmwareVerified" : return History.Event.Type.firmwareVerified;
}
throw new IllegalArgumentException("Unknown node event type '" + eventTypeString + "'");
}
private String toString(History.Event.Type nodeEventType) {
switch (nodeEventType) {
case provisioned : return "provisioned";
case deprovisioned : return "deprovisioned";
case readied : return "readied";
case reserved : return "reserved";
case activated : return "activated";
case wantToRetire: return "wantToRetire";
case retired : return "retired";
case deactivated : return "deactivated";
case parked : return "parked";
case failed : return "failed";
case deallocated : return "deallocated";
case down : return "down";
case requested: return "requested";
case rebooted: return "rebooted";
case osUpgraded: return "osUpgraded";
case firmwareVerified: return "firmwareVerified";
}
throw new IllegalArgumentException("Serialized form of '" + nodeEventType + "' not defined");
}
private Agent eventAgentFromSlime(Inspector eventAgentField) {
switch (eventAgentField.asString()) {
case "operator" : return Agent.operator;
case "application" : return Agent.application;
case "system" : return Agent.system;
case "NodeFailer" : return Agent.NodeFailer;
case "Rebalancer" : return Agent.Rebalancer;
case "DirtyExpirer" : return Agent.DirtyExpirer;
case "FailedExpirer" : return Agent.FailedExpirer;
case "InactiveExpirer" : return Agent.InactiveExpirer;
case "ProvisionedExpirer" : return Agent.ProvisionedExpirer;
case "ReservationExpirer" : return Agent.ReservationExpirer;
case "DynamicProvisioningMaintainer" : return Agent.DynamicProvisioningMaintainer;
}
throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'");
}
private String toString(Agent agent) {
switch (agent) {
case operator : return "operator";
case application : return "application";
case system : return "system";
case NodeFailer : return "NodeFailer";
case Rebalancer : return "Rebalancer";
case DirtyExpirer : return "DirtyExpirer";
case FailedExpirer : return "FailedExpirer";
case InactiveExpirer : return "InactiveExpirer";
case ProvisionedExpirer : return "ProvisionedExpirer";
case ReservationExpirer : return "ReservationExpirer";
case DynamicProvisioningMaintainer : return "DynamicProvisioningMaintainer";
}
throw new IllegalArgumentException("Serialized form of '" + agent + "' not defined");
}
static NodeType nodeTypeFromString(String typeString) {
switch (typeString) {
case "tenant": return NodeType.tenant;
case "host": return NodeType.host;
case "proxy": return NodeType.proxy;
case "proxyhost": return NodeType.proxyhost;
case "config": return NodeType.config;
case "confighost": return NodeType.confighost;
case "controller": return NodeType.controller;
case "controllerhost": return NodeType.controllerhost;
case "devhost": return NodeType.devhost;
default : throw new IllegalArgumentException("Unknown node type '" + typeString + "'");
}
}
static String toString(NodeType type) {
switch (type) {
case tenant: return "tenant";
case host: return "host";
case proxy: return "proxy";
case proxyhost: return "proxyhost";
case config: return "config";
case confighost: return "confighost";
case controller: return "controller";
case controllerhost: return "controllerhost";
case devhost: return "devhost";
}
throw new IllegalArgumentException("Serialized form of '" + type + "' not defined");
}
private static NodeResources.DiskSpeed diskSpeedFromSlime(Inspector diskSpeed) {
switch (diskSpeed.asString()) {
case "fast" : return NodeResources.DiskSpeed.fast;
case "slow" : return NodeResources.DiskSpeed.slow;
case "any" : return NodeResources.DiskSpeed.any;
default: throw new IllegalStateException("Illegal disk-speed value '" + diskSpeed.asString() + "'");
}
}
private static String diskSpeedToString(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalStateException("Illegal disk-speed value '" + diskSpeed + "'");
}
}
private static NodeResources.StorageType storageTypeFromSlime(Inspector storageType) {
if ( ! storageType.valid()) return NodeResources.StorageType.getDefault();
switch (storageType.asString()) {
case "remote" : return NodeResources.StorageType.remote;
case "local" : return NodeResources.StorageType.local;
case "any" : return NodeResources.StorageType.any;
default: throw new IllegalStateException("Illegal storage-type value '" + storageType.asString() + "'");
}
}
private static String storageTypeToString(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalStateException("Illegal storage-type value '" + storageType + "'");
}
}
} | class NodeSerializer {
/** The configured node flavors */
private final NodeFlavors flavors;
private static final String hostnameKey = "hostname";
private static final String ipAddressesKey = "ipAddresses";
private static final String ipAddressPoolKey = "additionalIpAddresses";
private static final String idKey = "openStackId";
private static final String parentHostnameKey = "parentHostname";
private static final String historyKey = "history";
private static final String instanceKey = "instance";
private static final String rebootGenerationKey = "rebootGeneration";
private static final String currentRebootGenerationKey = "currentRebootGeneration";
private static final String vespaVersionKey = "vespaVersion";
private static final String currentDockerImageKey = "currentDockerImage";
private static final String failCountKey = "failCount";
private static final String nodeTypeKey = "type";
private static final String wantToRetireKey = "wantToRetire";
private static final String wantToDeprovisionKey = "wantToDeprovision";
private static final String osVersionKey = "osVersion";
private static final String wantedOsVersionKey = "wantedOsVersion";
private static final String firmwareCheckKey = "firmwareCheck";
private static final String reportsKey = "reports";
private static final String modelNameKey = "modelName";
private static final String reservedToKey = "reservedTo";
private static final String flavorKey = "flavor";
private static final String resourcesKey = "resources";
private static final String vcpuKey = "vcpu";
private static final String memoryKey = "memory";
private static final String diskKey = "disk";
private static final String bandwidthKey = "bandwidth";
private static final String diskSpeedKey = "diskSpeed";
private static final String storageTypeKey = "storageType";
private static final String tenantIdKey = "tenantId";
private static final String applicationIdKey = "applicationId";
private static final String instanceIdKey = "instanceId";
private static final String serviceIdKey = "serviceId";
private static final String requestedResourcesKey = "requestedResources";
private static final String restartGenerationKey = "restartGeneration";
private static final String currentRestartGenerationKey = "currentRestartGeneration";
private static final String removableKey = "removable";
private static final String wantedVespaVersionKey = "wantedVespaVersion";
private static final String wantedDockerImageRepoKey = "wantedDockerImageRepo";
private static final String historyEventTypeKey = "type";
private static final String atKey = "at";
private static final String agentKey = "agent";
private static final String networkPortsKey = "networkPorts";
public NodeSerializer(NodeFlavors flavors) {
this.flavors = flavors;
}
public byte[] toJson(Node node) {
try {
Slime slime = new Slime();
toSlime(node, slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
catch (IOException e) {
throw new RuntimeException("Serialization of " + node + " to json failed", e);
}
}
private void toSlime(Node node, Cursor object) {
object.setString(hostnameKey, node.hostname());
toSlime(node.ipConfig().primary(), object.setArray(ipAddressesKey), IP.Config::require);
toSlime(node.ipConfig().pool().asSet(), object.setArray(ipAddressPoolKey), UnaryOperator.identity() /* Pool already holds a validated address list */);
object.setString(idKey, node.id());
node.parentHostname().ifPresent(hostname -> object.setString(parentHostnameKey, hostname));
toSlime(node.flavor(), object);
object.setLong(rebootGenerationKey, node.status().reboot().wanted());
object.setLong(currentRebootGenerationKey, node.status().reboot().current());
node.status().vespaVersion().ifPresent(version -> object.setString(vespaVersionKey, version.toString()));
node.status().dockerImage().ifPresent(image -> object.setString(currentDockerImageKey, image.asString()));
object.setLong(failCountKey, node.status().failCount());
object.setBool(wantToRetireKey, node.status().wantToRetire());
object.setBool(wantToDeprovisionKey, node.status().wantToDeprovision());
node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey)));
toSlime(node.history(), object.setArray(historyKey));
object.setString(nodeTypeKey, toString(node.type()));
node.status().osVersion().current().ifPresent(version -> object.setString(osVersionKey, version.toString()));
node.status().osVersion().wanted().ifPresent(version -> object.setString(wantedOsVersionKey, version.toFullString()));
node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong(firmwareCheckKey, instant.toEpochMilli()));
node.reports().toSlime(object, reportsKey);
node.modelName().ifPresent(modelName -> object.setString(modelNameKey, modelName));
node.reservedTo().ifPresent(tenant -> object.setString(reservedToKey, tenant.value()));
}
private void toSlime(Flavor flavor, Cursor object) {
if (flavor.isConfigured()) {
object.setString(flavorKey, flavor.name());
if (flavor.flavorOverrides().isPresent()) {
Cursor resourcesObject = object.setObject(resourcesKey);
flavor.flavorOverrides().get().diskGb().ifPresent(diskGb -> resourcesObject.setDouble(diskKey, diskGb));
}
}
else {
toSlime(flavor.resources(), object.setObject(resourcesKey));
}
}
private void toSlime(NodeResources resources, Cursor resourcesObject) {
resourcesObject.setDouble(vcpuKey, resources.vcpu());
resourcesObject.setDouble(memoryKey, resources.memoryGb());
resourcesObject.setDouble(diskKey, resources.diskGb());
resourcesObject.setDouble(bandwidthKey, resources.bandwidthGbps());
resourcesObject.setString(diskSpeedKey, diskSpeedToString(resources.diskSpeed()));
resourcesObject.setString(storageTypeKey, storageTypeToString(resources.storageType()));
}
private void toSlime(History history, Cursor array) {
for (History.Event event : history.events())
toSlime(event, array.addObject());
}
private void toSlime(History.Event event, Cursor object) {
object.setString(historyEventTypeKey, toString(event.type()));
object.setLong(atKey, event.at().toEpochMilli());
object.setString(agentKey, toString(event.agent()));
}
private void toSlime(Set<String> ipAddresses, Cursor array, UnaryOperator<Set<String>> validator) {
validator.apply(ipAddresses).stream().sorted(IP.NATURAL_ORDER).forEach(array::addString);
}
public Node fromJson(Node.State state, byte[] data) {
return nodeFromSlime(state, SlimeUtils.jsonToSlime(data).get());
}
private Node nodeFromSlime(Node.State state, Inspector object) {
Flavor flavor = flavorFromSlime(object);
return new Node(object.field(idKey).asString(),
new IP.Config(ipAddressesFromSlime(object, ipAddressesKey),
ipAddressesFromSlime(object, ipAddressPoolKey)),
object.field(hostnameKey).asString(),
parentHostnameFromSlime(object),
flavor,
statusFromSlime(object),
state,
allocationFromSlime(flavor.resources(), object.field(instanceKey)),
historyFromSlime(object.field(historyKey)),
nodeTypeFromString(object.field(nodeTypeKey).asString()),
Reports.fromSlime(object.field(reportsKey)),
modelNameFromSlime(object),
reservedToFromSlime(object.field(reservedToKey)));
}
private Status statusFromSlime(Inspector object) {
return new Status(generationFromSlime(object, rebootGenerationKey, currentRebootGenerationKey),
versionFromSlime(object.field(vespaVersionKey)),
dockerImageFromSlime(object.field(currentDockerImageKey)),
(int) object.field(failCountKey).asLong(),
object.field(wantToRetireKey).asBool(),
object.field(wantToDeprovisionKey).asBool(),
new OsVersion(versionFromSlime(object.field(osVersionKey)),
versionFromSlime(object.field(wantedOsVersionKey))),
instantFromSlime(object.field(firmwareCheckKey)));
}
private Flavor flavorFromSlime(Inspector object) {
Inspector resources = object.field(resourcesKey);
if (object.field(flavorKey).valid()) {
Flavor flavor = flavors.getFlavorOrThrow(object.field(flavorKey).asString());
if (!resources.valid()) return flavor;
return flavor.with(FlavorOverrides.ofDisk(resources.field(diskKey).asDouble()));
}
else {
return new Flavor(resourcesFromSlime(resources).get());
}
}
private Optional<NodeResources> resourcesFromSlime(Inspector resources) {
if ( ! resources.valid()) return Optional.empty();
return Optional.of(new NodeResources(resources.field(vcpuKey).asDouble(),
resources.field(memoryKey).asDouble(),
resources.field(diskKey).asDouble(),
resources.field(bandwidthKey).asDouble(),
diskSpeedFromSlime(resources.field(diskSpeedKey)),
storageTypeFromSlime(resources.field(storageTypeKey))));
}
private Optional<Allocation> allocationFromSlime(NodeResources assignedResources, Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(new Allocation(applicationIdFromSlime(object),
clusterMembershipFromSlime(object),
resourcesFromSlime(object.field(requestedResourcesKey)).orElse(assignedResources),
generationFromSlime(object, restartGenerationKey, currentRestartGenerationKey),
object.field(removableKey).asBool(),
NetworkPortsSerializer.fromSlime(object.field(networkPortsKey))));
}
private ApplicationId applicationIdFromSlime(Inspector object) {
return ApplicationId.from(TenantName.from(object.field(tenantIdKey).asString()),
ApplicationName.from(object.field(applicationIdKey).asString()),
InstanceName.from(object.field(instanceIdKey).asString()));
}
private History historyFromSlime(Inspector array) {
List<History.Event> events = new ArrayList<>();
array.traverse((ArrayTraverser) (int i, Inspector item) -> {
History.Event event = eventFromSlime(item);
if (event != null)
events.add(event);
});
return new History(events);
}
private History.Event eventFromSlime(Inspector object) {
History.Event.Type type = eventTypeFromString(object.field(historyEventTypeKey).asString());
if (type == null) return null;
Instant at = Instant.ofEpochMilli(object.field(atKey).asLong());
Agent agent = eventAgentFromSlime(object.field(agentKey));
return new History.Event(type, agent, at);
}
private Generation generationFromSlime(Inspector object, String wantedField, String currentField) {
Inspector current = object.field(currentField);
return new Generation(object.field(wantedField).asLong(), current.asLong());
}
private ClusterMembership clusterMembershipFromSlime(Inspector object) {
return ClusterMembership.from(object.field(serviceIdKey).asString(),
versionFromSlime(object.field(wantedVespaVersionKey)).get(),
dockerImageRepoFromSlime(object.field(wantedDockerImageRepoKey)));
}
private Optional<Version> versionFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(Version.fromString(object.asString()));
}
private Optional<String> dockerImageRepoFromSlime(Inspector object) {
if ( ! object.valid() || object.asString().isEmpty()) return Optional.empty();
return Optional.of(object.asString());
}
private Optional<DockerImage> dockerImageFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(DockerImage.fromString(object.asString()));
}
private Optional<Instant> instantFromSlime(Inspector object) {
if ( ! object.valid())
return Optional.empty();
return Optional.of(Instant.ofEpochMilli(object.asLong()));
}
private Optional<String> parentHostnameFromSlime(Inspector object) {
if (object.field(parentHostnameKey).valid())
return Optional.of(object.field(parentHostnameKey).asString());
else
return Optional.empty();
}
private Set<String> ipAddressesFromSlime(Inspector object, String key) {
ImmutableSet.Builder<String> ipAddresses = ImmutableSet.builder();
object.field(key).traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString()));
return ipAddresses.build();
}
private Optional<String> modelNameFromSlime(Inspector object) {
if (object.field(modelNameKey).valid()) {
return Optional.of(object.field(modelNameKey).asString());
}
return Optional.empty();
}
private Optional<TenantName> reservedToFromSlime(Inspector object) {
if (! object.valid()) return Optional.empty();
if (object.type() != Type.STRING)
throw new IllegalArgumentException("Expected 'reservedTo' to be a string but is " + object);
return Optional.of(TenantName.from(object.asString()));
}
/** Returns the event type, or null if this event type should be ignored */
private History.Event.Type eventTypeFromString(String eventTypeString) {
switch (eventTypeString) {
case "provisioned" : return History.Event.Type.provisioned;
case "deprovisioned" : return History.Event.Type.deprovisioned;
case "readied" : return History.Event.Type.readied;
case "reserved" : return History.Event.Type.reserved;
case "activated" : return History.Event.Type.activated;
case "wantToRetire": return History.Event.Type.wantToRetire;
case "retired" : return History.Event.Type.retired;
case "deactivated" : return History.Event.Type.deactivated;
case "parked" : return History.Event.Type.parked;
case "failed" : return History.Event.Type.failed;
case "deallocated" : return History.Event.Type.deallocated;
case "down" : return History.Event.Type.down;
case "requested" : return History.Event.Type.requested;
case "rebooted" : return History.Event.Type.rebooted;
case "osUpgraded" : return History.Event.Type.osUpgraded;
case "firmwareVerified" : return History.Event.Type.firmwareVerified;
}
throw new IllegalArgumentException("Unknown node event type '" + eventTypeString + "'");
}
private String toString(History.Event.Type nodeEventType) {
switch (nodeEventType) {
case provisioned : return "provisioned";
case deprovisioned : return "deprovisioned";
case readied : return "readied";
case reserved : return "reserved";
case activated : return "activated";
case wantToRetire: return "wantToRetire";
case retired : return "retired";
case deactivated : return "deactivated";
case parked : return "parked";
case failed : return "failed";
case deallocated : return "deallocated";
case down : return "down";
case requested: return "requested";
case rebooted: return "rebooted";
case osUpgraded: return "osUpgraded";
case firmwareVerified: return "firmwareVerified";
}
throw new IllegalArgumentException("Serialized form of '" + nodeEventType + "' not defined");
}
private Agent eventAgentFromSlime(Inspector eventAgentField) {
switch (eventAgentField.asString()) {
case "operator" : return Agent.operator;
case "application" : return Agent.application;
case "system" : return Agent.system;
case "NodeFailer" : return Agent.NodeFailer;
case "Rebalancer" : return Agent.Rebalancer;
case "DirtyExpirer" : return Agent.DirtyExpirer;
case "FailedExpirer" : return Agent.FailedExpirer;
case "InactiveExpirer" : return Agent.InactiveExpirer;
case "ProvisionedExpirer" : return Agent.ProvisionedExpirer;
case "ReservationExpirer" : return Agent.ReservationExpirer;
case "DynamicProvisioningMaintainer" : return Agent.DynamicProvisioningMaintainer;
}
throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'");
}
private String toString(Agent agent) {
switch (agent) {
case operator : return "operator";
case application : return "application";
case system : return "system";
case NodeFailer : return "NodeFailer";
case Rebalancer : return "Rebalancer";
case DirtyExpirer : return "DirtyExpirer";
case FailedExpirer : return "FailedExpirer";
case InactiveExpirer : return "InactiveExpirer";
case ProvisionedExpirer : return "ProvisionedExpirer";
case ReservationExpirer : return "ReservationExpirer";
case DynamicProvisioningMaintainer : return "DynamicProvisioningMaintainer";
}
throw new IllegalArgumentException("Serialized form of '" + agent + "' not defined");
}
static NodeType nodeTypeFromString(String typeString) {
switch (typeString) {
case "tenant": return NodeType.tenant;
case "host": return NodeType.host;
case "proxy": return NodeType.proxy;
case "proxyhost": return NodeType.proxyhost;
case "config": return NodeType.config;
case "confighost": return NodeType.confighost;
case "controller": return NodeType.controller;
case "controllerhost": return NodeType.controllerhost;
case "devhost": return NodeType.devhost;
default : throw new IllegalArgumentException("Unknown node type '" + typeString + "'");
}
}
static String toString(NodeType type) {
switch (type) {
case tenant: return "tenant";
case host: return "host";
case proxy: return "proxy";
case proxyhost: return "proxyhost";
case config: return "config";
case confighost: return "confighost";
case controller: return "controller";
case controllerhost: return "controllerhost";
case devhost: return "devhost";
}
throw new IllegalArgumentException("Serialized form of '" + type + "' not defined");
}
private static NodeResources.DiskSpeed diskSpeedFromSlime(Inspector diskSpeed) {
switch (diskSpeed.asString()) {
case "fast" : return NodeResources.DiskSpeed.fast;
case "slow" : return NodeResources.DiskSpeed.slow;
case "any" : return NodeResources.DiskSpeed.any;
default: throw new IllegalStateException("Illegal disk-speed value '" + diskSpeed.asString() + "'");
}
}
private static String diskSpeedToString(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalStateException("Illegal disk-speed value '" + diskSpeed + "'");
}
}
private static NodeResources.StorageType storageTypeFromSlime(Inspector storageType) {
if ( ! storageType.valid()) return NodeResources.StorageType.getDefault();
switch (storageType.asString()) {
case "remote" : return NodeResources.StorageType.remote;
case "local" : return NodeResources.StorageType.local;
case "any" : return NodeResources.StorageType.any;
default: throw new IllegalStateException("Illegal storage-type value '" + storageType.asString() + "'");
}
}
private static String storageTypeToString(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalStateException("Illegal storage-type value '" + storageType + "'");
}
}
} |
Should not have been there, removed | public void dockerImageRepoIsReturnedIfSet() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application1 = tester.makeApplicationId();
SystemState state1 = prepare(application1, tester, 1, 1, 1, 1, defaultResources, "1.2.3");
String dockerImageRepo = "docker.domain.tld/my/image";
prepare(application1, tester, 1, 1, 1 , 1 , false, defaultResources, "1.2.3", Optional.of(dockerImageRepo));
tester.activate(application1, state1.allHosts);
HostSpec host1 = state1.container0.iterator().next();
assertFalse(host1.version().isPresent());
Node node1 = tester.nodeRepository().getNode(host1.hostname()).get();
tester.nodeRepository().write(node1.with(node1.status().withDockerImage(DockerImage.fromString(dockerImageRepo))), () -> {});
SystemState state2 = prepare(application1, tester, 1, 1, 1 ,1 , false, defaultResources, "1.2.3", Optional.of(dockerImageRepo));
tester.activate(application1, state2.allHosts);
host1 = state2.container0.iterator().next();
assertEquals(dockerImageRepo, host1.dockerImageRepo().get());
} | assertFalse(host1.version().isPresent()); | public void dockerImageRepoIsReturnedIfSet() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application1 = tester.makeApplicationId();
SystemState state1 = prepare(application1, tester, 1, 1, 1, 1, defaultResources, "1.2.3");
String dockerImageRepo = "docker.domain.tld/my/image";
prepare(application1, tester, 1, 1, 1 , 1 , false, defaultResources, "1.2.3", Optional.of(dockerImageRepo));
tester.activate(application1, state1.allHosts);
HostSpec host1 = state1.container0.iterator().next();
Node node1 = tester.nodeRepository().getNode(host1.hostname()).get();
DockerImage dockerImage = DockerImage.fromString(dockerImageRepo).withTag(Version.fromString("1.2.3"));
tester.nodeRepository().write(node1.with(node1.status().withDockerImage(dockerImage)), () -> {});
SystemState state2 = prepare(application1, tester, 1, 1, 1 ,1 , false, defaultResources, "1.2.3", Optional.of(dockerImageRepo));
tester.activate(application1, state2.allHosts);
host1 = state2.container0.iterator().next();
node1 = tester.nodeRepository().getNode(host1.hostname()).get();
assertEquals(dockerImage, node1.status().dockerImage().get());
} | class ProvisioningTest {
private static final NodeResources defaultResources = new NodeResources(1, 4, 10, 4);
@Test
public void application_deployment_constant_application_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
ApplicationId application2 = tester.makeApplicationId();
tester.makeReadyNodes(21, defaultResources);
SystemState state1 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state2.assertEquals(state1);
tester.activate(application1, state2.allHosts);
SystemState state1App2 = prepare(application2, 2, 2, 3, 3, defaultResources, tester);
assertFalse("Hosts to different apps are disjunct", state1App2.allHosts.removeAll(state1.allHosts));
tester.activate(application2, state1App2.allHosts);
SystemState state3 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
SystemState state4 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state3.assertEquals(state2);
state4.assertEquals(state3);
tester.activate(application1, state4.allHosts);
SystemState state5 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
HostSpec removed = tester.removeOne(state5.allHosts);
tester.activate(application1, state5.allHosts);
assertEquals(removed.hostname(), tester.nodeRepository().getNodes(application1, Node.State.inactive).get(0).hostname());
SystemState state6 = prepare(application1, 0, 2, 0, 3, defaultResources, tester);
tester.activate(application1, state6.allHosts);
assertEquals(5, tester.getNodes(application1, Node.State.active).size());
assertEquals(5, tester.getNodes(application1, Node.State.inactive).size());
NestedTransaction removeTransaction = new NestedTransaction();
tester.provisioner().remove(removeTransaction, application1);
removeTransaction.commit();
assertEquals(tester.toHostNames(state1.allHosts), tester.toHostNames(tester.nodeRepository().getNodes(application1, Node.State.inactive)));
assertEquals(0, tester.getNodes(application1, Node.State.active).size());
assertEquals(state1App2.hostNames(), tester.toHostNames(tester.nodeRepository().getNodes(application2, Node.State.active)));
HostSpec failed = tester.removeOne(state1App2.allHosts);
tester.fail(failed);
assertEquals(9, tester.getNodes(application2, Node.State.active).size());
SystemState state2App2 = prepare(application2, 2, 2, 3, 3, defaultResources, tester);
assertFalse("Hosts to different apps are disjunct", state2App2.allHosts.removeAll(state1.allHosts));
assertEquals("A new node was reserved to replace the failed one", 10, state2App2.allHosts.size());
assertFalse("The new host is not the failed one", state2App2.allHosts.contains(failed));
tester.activate(application2, state2App2.allHosts);
SystemState state7 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state7.assertEquals(state1);
tester.activate(application1, state7.allHosts);
assertEquals(0, tester.getNodes(application1, Node.State.inactive).size());
HostFilter allFilter = HostFilter.all();
HostFilter hostFilter = HostFilter.hostname(state6.allHosts.iterator().next().hostname());
HostFilter clusterTypeFilter = HostFilter.clusterType(ClusterSpec.Type.container);
HostFilter clusterIdFilter = HostFilter.clusterId(ClusterSpec.Id.from("container1"));
tester.provisioner().restart(application1, allFilter);
tester.provisioner().restart(application1, hostFilter);
tester.provisioner().restart(application1, clusterTypeFilter);
tester.provisioner().restart(application1, clusterIdFilter);
tester.assertRestartCount(application1, allFilter, hostFilter, clusterTypeFilter, clusterIdFilter);
}
@Test
public void nodeVersionIsReturnedIfSet() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application1 = tester.makeApplicationId();
SystemState state1 = prepare(application1, 1, 1, 1, 1, defaultResources, tester);
tester.activate(application1, state1.allHosts);
HostSpec host1 = state1.container0.iterator().next();
assertFalse(host1.version().isPresent());
Node node1 = tester.nodeRepository().getNode(host1.hostname()).get();
tester.nodeRepository().write(node1.with(node1.status().withVespaVersion(Version.fromString("1.2.3"))), () -> {});
SystemState state2 = prepare(application1, 1, 1, 1, 1, defaultResources, tester);
tester.activate(application1, state2.allHosts);
host1 = state2.container0.iterator().next();
assertEquals(Version.fromString("1.2.3"), host1.version().get());
}
@Test
@Test
public void application_deployment_variable_application_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(24, defaultResources);
SystemState state1 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 3, 4, 4, 5, defaultResources, tester);
state2.assertExtends(state1);
assertEquals("New nodes are reserved", 6, tester.getNodes(application1, Node.State.reserved).size());
tester.activate(application1, state2.allHosts);
SystemState state3 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state3.allHosts);
assertEquals("Superfluous container nodes are deactivated",
3-2 + 4-2, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Superfluous content nodes are retired",
4-3 + 5-3, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state4 = prepare(application1, 4, 5, 5, 6, defaultResources, tester);
assertEquals("Inactive nodes are reused", 0, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Earlier retired nodes are not unretired before activate",
4-3 + 5-3, tester.getNodes(application1, Node.State.active).retired().size());
state4.assertExtends(state2);
assertEquals("New and inactive nodes are reserved", 4 + 3, tester.getNodes(application1, Node.State.reserved).size());
HostSpec removed = state4.removeHost(tester.getNodes(application1, Node.State.active).retired().asList().get(0).hostname());
tester.activate(application1, state4.allHosts);
assertEquals("Retired active removed when activating became inactive",
1, tester.getNodes(application1, Node.State.inactive).asList().size());
assertEquals(removed.hostname(), tester.getNodes(application1, Node.State.inactive).asList().get(0).hostname());
assertEquals("Earlier retired nodes are unretired on activate",
0, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state5 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state5.allHosts);
assertEquals("Superfluous container nodes are also deactivated",
4-2 + 5-2 + 1, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Superfluous content nodes are retired",
5-3 + 6-3 - 1, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state6 = prepare(application1, 2, 2, 4, 3, defaultResources, tester);
tester.activate(application1, state6.allHosts);
assertEquals("One content node is unretired",
5-4 + 6-3 - 1, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state7 = prepare(application1, 8, 2, 2, 2, defaultResources, tester);
NestedTransaction removeTransaction = new NestedTransaction();
tester.provisioner().remove(removeTransaction, application1);
removeTransaction.commit();
assertEquals(0, tester.getNodes(application1, Node.State.active).size());
assertEquals(0, tester.getNodes(application1, Node.State.reserved).size());
}
@Test
public void application_deployment_multiple_flavors() {
NodeResources small = new NodeResources(1, 4, 10, 0.3);
NodeResources large = new NodeResources(8, 8, 40, 0.3);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(12, small);
SystemState state1 = prepare(application1, 2, 2, 4, 4, small, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 3, 3, small, tester);
tester.activate(application1, state2.allHosts);
tester.makeReadyNodes(16, large);
SystemState state3 = prepare(application1, 3, 4, 4, 5, large, tester);
assertEquals("New nodes are reserved", 16, tester.nodeRepository().getNodes(application1, Node.State.reserved).size());
tester.activate(application1, state3.allHosts);
assertEquals("small container nodes are retired because we are swapping the entire cluster",
2 + 2, tester.getNodes(application1, Node.State.active).retired().type(ClusterSpec.Type.container).resources(small).size());
assertEquals("'small content nodes are retired",
4 + 4, tester.getNodes(application1, Node.State.active).retired().type(ClusterSpec.Type.content).resources(small).size());
assertEquals("No large content nodes are retired",
0, tester.getNodes(application1, Node.State.active).retired().resources(large).size());
}
@Test
public void application_deployment_above_then_at_capacity_limit() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(5, defaultResources);
SystemState state1 = prepare(application1, 2, 0, 3, 0, defaultResources, tester);
tester.activate(application1, state1.allHosts);
try {
SystemState state2 = prepare(application1, 3, 0, 3, 0, defaultResources, tester);
fail("Expected out of capacity exception");
}
catch (OutOfCapacityException expected) {
}
SystemState state3 = prepare(application1, 2, 0, 3, 0, defaultResources, tester);
tester.activate(application1, state3.allHosts);
}
@Test
public void dev_deployment_node_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void requested_resources_info_is_retained() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
{
SystemState state = prepare(application, 0, 0, 3, 3,
defaultResources.justNumbers(),
tester);
assertEquals(6, state.allHosts.size());
tester.activate(application, state.allHosts);
assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.any));
assertTrue(tester.nodeRepository().getNodes(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any));
}
{
SystemState state = prepare(application, 0, 0, 5, 3,
defaultResources,
tester);
assertEquals(8, state.allHosts.size());
tester.activate(application, state.allHosts);
assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.fast));
assertTrue(tester.nodeRepository().getNodes(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.fast));
}
}
@Test
public void deploy_specific_vespa_version() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, tester, 2, 2, 3, 3, defaultResources, "6.91");
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void deploy_specific_vespa_version_and_docker_image() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
String dockerImageRepo = "docker.domain.tld/my/image";
SystemState state = prepare(application, tester, 2, 2, 3, 3, false, defaultResources, "6.91", Optional.of(dockerImageRepo));
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void test_deployment_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.test, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(4, defaultResources);
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test(expected = IllegalArgumentException.class)
public void prod_deployment_requires_redundancy() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
prepare(application, 1, 2, 3, 3, defaultResources, tester);
}
/** Dev always uses the zone default flavor */
@Test
public void dev_deployment_flavor() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, new NodeResources(2, 4, 10, 2), NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3,
new NodeResources(2, 4, 10, 2), tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
/** Test always uses the zone default resources */
@Test
public void test_deployment_resources() {
NodeResources large = new NodeResources(2, 4, 10, 0.3);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.test, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(4, large);
SystemState state = prepare(application, 2, 2, 3, 3, large, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void staging_deployment_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.staging, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(14, defaultResources);
SystemState state = prepare(application, 1, 1, 1, 64, defaultResources, tester);
assertEquals(9, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void activate_after_reservation_timeout() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(10, defaultResources);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
NestedTransaction deactivateTransaction = new NestedTransaction();
tester.nodeRepository().deactivate(application, deactivateTransaction);
deactivateTransaction.commit();
try {
tester.activate(application, state.allHosts);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertTrue(e.getMessage().startsWith("Activation of " + application + " failed"));
}
}
@Test
public void out_of_capacity() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(9, defaultResources);
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, 2, 2, 3, 3, defaultResources, tester);
fail("Expected exception");
}
catch (OutOfCapacityException e) {
assertTrue(e.getMessage().startsWith("Could not satisfy request"));
}
}
@Test
public void out_of_quota() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.Public,
Environment.prod,
RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources);
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, 2, 2, 6, 3, defaultResources, tester);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("6 nodes [vcpu: 1.0, memory: 4.0 Gb, disk 10.0 Gb, bandwidth: 4.0 Gbps] requested for content cluster 'content0' 6.42 exceeds your quota. Resolve this at https:
e.getMessage());
}
}
@Test
public void no_out_of_quota_outside_public() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.main,
Environment.prod,
RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources);
ApplicationId application = tester.makeApplicationId();
prepare(application, 2, 2, 6, 3, defaultResources, tester);
}
@Test
public void out_of_capacity_but_cannot_fail() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources);
ApplicationId application = tester.makeApplicationId();
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build();
tester.prepare(application, cluster, Capacity.fromCount(5, Optional.empty(), false, false), 1);
}
@Test
public void out_of_capacity_all_nodes_want_to_retire() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
List<Node> readyNodes = tester.makeReadyNodes(5, defaultResources);
readyNodes.forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
try {
prepare(application, 2, 0, 2, 0, defaultResources, tester);
fail("Expected exception");
} catch (OutOfCapacityException e) {
assertTrue(e.getMessage().startsWith("Could not satisfy request"));
}
}
@Test
public void want_to_retire_but_cannot_fail() {
Capacity capacity = Capacity.fromCount(5, Optional.of(defaultResources), false, true);
Capacity capacityFORCED = Capacity.fromCount(5, Optional.of(defaultResources), false, false);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build();
tester.activate(application, tester.prepare(application, cluster, capacity, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(0, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
tester.nodeRepository().getNodes(application, Node.State.active).forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
tester.activate(application, tester.prepare(application, cluster, capacityFORCED, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(0, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
tester.nodeRepository().getNodes(application, Node.State.active).forEach(node -> assertTrue(node.status().wantToRetire()));
tester.activate(application, tester.prepare(application, cluster, capacity, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
}
@Test
public void highest_node_indexes_are_retired_first() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(14, defaultResources);
SystemState state1 = prepare(application1, 3, 3, 4, 4, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 2, 2, defaultResources, tester);
tester.activate(application1, state2.allHosts);
assertFalse(state2.hostByMembership("content0", 0, 0).membership().get().retired());
assertFalse( state2.hostByMembership("content0", 0, 1).membership().get().retired());
assertTrue( state2.hostByMembership("content0", 0, 2).membership().get().retired());
assertTrue( state2.hostByMembership("content0", 0, 3).membership().get().retired());
assertFalse(state2.hostByMembership("content1", 0, 0).membership().get().retired());
assertFalse(state2.hostByMembership("content1", 0, 1).membership().get().retired());
assertTrue( state2.hostByMembership("content1", 0, 2).membership().get().retired());
assertTrue( state2.hostByMembership("content1", 0, 3).membership().get().retired());
}
@Test
public void application_deployment_retires_nodes_that_want_to_retire() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
{
SystemState state = prepare(application, 2, 0, 2, 0,defaultResources, tester);
tester.activate(application, state.allHosts);
assertEquals(4, tester.getNodes(application, Node.State.active).size());
}
{
List<Node> nodesToRetire = tester.getNodes(application, Node.State.active).asList().subList(0, 2);
nodesToRetire.forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
SystemState state = prepare(application, 2, 0, 2, 0, defaultResources, tester);
tester.activate(application, state.allHosts);
List<Node> retiredNodes = tester.getNodes(application).retired().asList();
assertEquals(2, retiredNodes.size());
assertTrue("Nodes are retired by system", retiredNodes.stream().allMatch(retiredBy(Agent.system)));
}
}
@Test
public void application_deployment_extends_existing_reservations_on_deploy() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(2, defaultResources);
try {
prepare(application, 2, 0, 2, 0, defaultResources, tester);
fail("Expected exception");
} catch (OutOfCapacityException ignored) {}
assertEquals("Reserved a subset of required nodes", 2,
tester.getNodes(application, Node.State.reserved).size());
tester.makeReadyNodes(2, defaultResources);
tester.clock().advance(Duration.ofMinutes(2));
SystemState state = prepare(application, 2, 0, 2, 0, defaultResources, tester);
List<Node> reserved = tester.getNodes(application, Node.State.reserved).asList();
assertEquals("Reserved required nodes", 4, reserved.size());
assertTrue("Time of event is updated for all nodes",
reserved.stream()
.allMatch(n -> n.history()
.event(History.Event.Type.reserved)
.get().at()
.equals(tester.clock().instant().truncatedTo(MILLIS))));
tester.clock().advance(Duration.ofMinutes(8).plus(Duration.ofSeconds(1)));
ReservationExpirer expirer = new ReservationExpirer(tester.nodeRepository(), tester.clock(),
Duration.ofMinutes(10));
expirer.run();
assertEquals("Nodes remain reserved", 4,
tester.getNodes(application, Node.State.reserved).size());
tester.activate(application, state.allHosts);
assertEquals(4, tester.getNodes(application, Node.State.active).size());
}
@Test
public void required_capacity_respects_prod_redundancy_requirement() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, "6.42", Optional.empty());
fail("Expected exception");
} catch (IllegalArgumentException ignored) {}
}
@Test
public void devsystem_application_deployment_on_devhost() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.dev, Environment.dev, RegionName.from("no-central"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.devhost, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.devhost);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void cluster_spec_update_for_already_reserved_nodes() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
String version1 = "6.42";
String version2 = "6.43";
tester.makeReadyNodes(2, defaultResources);
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, version1, Optional.empty());
tester.getNodes(application, Node.State.reserved).forEach(node ->
assertEquals(Version.fromString(version1), node.allocation().get().membership().cluster().vespaVersion()));
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, version2, Optional.empty());
tester.getNodes(application, Node.State.reserved).forEach(node ->
assertEquals(Version.fromString(version2), node.allocation().get().membership().cluster().vespaVersion()));
}
@Test
public void change_to_and_from_combined_cluster_does_not_change_node_allocation() {
var tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
var application = tester.makeApplicationId();
tester.makeReadyNodes(4, defaultResources);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
var initialNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
cluster = ClusterSpec.request(ClusterSpec.Type.combined, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
var newNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
assertEquals("Node allocation remains the same", initialNodes, newNodes);
assertEquals("Cluster type is updated",
Set.of(ClusterSpec.Type.combined),
newNodes.stream().map(n -> n.membership().get().cluster().type()).collect(Collectors.toSet()));
cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
newNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
assertEquals("Node allocation remains the same", initialNodes, newNodes);
assertEquals("Cluster type is updated",
Set.of(ClusterSpec.Type.content),
newNodes.stream().map(n -> n.membership().get().cluster().type()).collect(Collectors.toSet()));
}
private SystemState prepare(ApplicationId application, int container0Size, int container1Size, int content0Size,
int content1Size, NodeResources flavor, ProvisioningTester tester) {
return prepare(application, tester, container0Size, container1Size, content0Size, content1Size, flavor, "6.42");
}
private SystemState prepare(ApplicationId application, ProvisioningTester tester, int container0Size, int container1Size, int content0Size,
int content1Size, NodeResources nodeResources, String wantedVersion) {
return prepare(application, tester, container0Size, container1Size, content0Size, content1Size, false, nodeResources,
wantedVersion, Optional.empty());
}
private SystemState prepare(ApplicationId application, ProvisioningTester tester, int container0Size, int container1Size, int content0Size,
int content1Size, boolean required, NodeResources nodeResources, String wantedVersion, Optional<String> dockerImageRepo) {
ClusterSpec containerCluster0 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container0")).vespaVersion(wantedVersion).build();
ClusterSpec containerCluster1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container1")).vespaVersion(wantedVersion).build();
ClusterSpec contentCluster0 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content0")).vespaVersion(wantedVersion).build();
ClusterSpec contentCluster1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content1")).vespaVersion(wantedVersion).build();
Set<HostSpec> container0 = prepare(application, tester, containerCluster0, container0Size, 1, required, nodeResources);
Set<HostSpec> container1 = prepare(application, tester, containerCluster1, container1Size, 1, required, nodeResources);
Set<HostSpec> content0 = prepare(application, tester, contentCluster0, content0Size, 1, required, nodeResources);
Set<HostSpec> content1 = prepare(application, tester, contentCluster1, content1Size, 1, required, nodeResources);
Set<HostSpec> allHosts = new HashSet<>();
allHosts.addAll(container0);
allHosts.addAll(container1);
allHosts.addAll(content0);
allHosts.addAll(content1);
Function<Integer, Capacity> capacity = count -> Capacity.fromCount(count, Optional.empty(), required, true);
int expectedContainer0Size = tester.capacityPolicies().decideSize(capacity.apply(container0Size), containerCluster0, application);
int expectedContainer1Size = tester.capacityPolicies().decideSize(capacity.apply(container1Size), containerCluster1, application);
int expectedContent0Size = tester.capacityPolicies().decideSize(capacity.apply(content0Size), contentCluster0, application);
int expectedContent1Size = tester.capacityPolicies().decideSize(capacity.apply(content1Size), contentCluster1, application);
assertEquals("Hosts in each group cluster is disjunct and the total number of unretired nodes is correct",
expectedContainer0Size + expectedContainer1Size + expectedContent0Size + expectedContent1Size,
tester.nonRetired(allHosts).size());
assertEquals(expectedContainer0Size, tester.nonRetired(container0).size());
assertEquals(expectedContainer1Size, tester.nonRetired(container1).size());
assertEquals(expectedContent0Size, tester.nonRetired(content0).size());
assertEquals(expectedContent1Size, tester.nonRetired(content1).size());
tester.assertMembersOf(containerCluster0, container0);
tester.assertMembersOf(containerCluster1, container1);
tester.assertMembersOf(contentCluster0, content0);
tester.assertMembersOf(contentCluster1, content1);
return new SystemState(allHosts, container0, container1, content0, content1);
}
private Set<HostSpec> prepare(ApplicationId application, ProvisioningTester tester, ClusterSpec cluster, int nodeCount, int groups,
boolean required, NodeResources nodeResources) {
if (nodeCount == 0) return Collections.emptySet();
return new HashSet<>(tester.prepare(application, cluster, nodeCount, groups, required, nodeResources));
}
private static class SystemState {
private Set<HostSpec> allHosts;
private Set<HostSpec> container0;
private Set<HostSpec> container1;
private Set<HostSpec> content0;
private Set<HostSpec> content1;
public SystemState(Set<HostSpec> allHosts,
Set<HostSpec> container1,
Set<HostSpec> container2,
Set<HostSpec> content0,
Set<HostSpec> content1) {
this.allHosts = allHosts;
this.container0 = container1;
this.container1 = container2;
this.content0 = content0;
this.content1 = content1;
}
/** Returns a host by cluster name and index, or null if there is no host with the given values in this */
public HostSpec hostByMembership(String clusterId, int group, int index) {
for (HostSpec host : allHosts) {
if ( ! host.membership().isPresent()) continue;
ClusterMembership membership = host.membership().get();
if (membership.cluster().id().value().equals(clusterId) &&
groupMatches(membership.cluster().group(), group) &&
membership.index() == index)
return host;
}
return null;
}
private boolean groupMatches(Optional<ClusterSpec.Group> clusterGroup, int group) {
if ( ! clusterGroup.isPresent()) return group==0;
return clusterGroup.get().index() == group;
}
public Set<String> hostNames() {
return allHosts.stream().map(HostSpec::hostname).collect(Collectors.toSet());
}
public HostSpec removeHost(String hostname) {
for (Iterator<HostSpec> i = allHosts.iterator(); i.hasNext();) {
HostSpec host = i.next();
if (host.hostname().equals(hostname)) {
i.remove();
return host;
}
}
return null;
}
public void assertExtends(SystemState other) {
assertTrue(this.allHosts.containsAll(other.allHosts));
assertExtends(this.container0, other.container0);
assertExtends(this.container1, other.container1);
assertExtends(this.content0, other.content0);
assertExtends(this.content1, other.content1);
}
private void assertExtends(Set<HostSpec> extension,
Set<HostSpec> original) {
for (HostSpec originalHost : original) {
HostSpec newHost = findHost(originalHost.hostname(), extension);
org.junit.Assert.assertEquals(newHost.membership(), originalHost.membership());
}
}
private HostSpec findHost(String hostName, Set<HostSpec> hosts) {
for (HostSpec host : hosts)
if (host.hostname().equals(hostName))
return host;
return null;
}
public void assertEquals(SystemState other) {
org.junit.Assert.assertEquals(this.allHosts, other.allHosts);
org.junit.Assert.assertEquals(this.container0, other.container0);
org.junit.Assert.assertEquals(this.container1, other.container1);
org.junit.Assert.assertEquals(this.content0, other.content0);
org.junit.Assert.assertEquals(this.content1, other.content1);
}
}
/** A predicate that returns whether a node has been retired by the given agent */
private static Predicate<Node> retiredBy(Agent agent) {
return (node) -> node.history().event(History.Event.Type.retired)
.filter(e -> e.type() == History.Event.Type.retired)
.filter(e -> e.agent() == agent)
.isPresent();
}
} | class ProvisioningTest {
private static final NodeResources defaultResources = new NodeResources(1, 4, 10, 4);
@Test
public void application_deployment_constant_application_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
ApplicationId application2 = tester.makeApplicationId();
tester.makeReadyNodes(21, defaultResources);
SystemState state1 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state2.assertEquals(state1);
tester.activate(application1, state2.allHosts);
SystemState state1App2 = prepare(application2, 2, 2, 3, 3, defaultResources, tester);
assertFalse("Hosts to different apps are disjunct", state1App2.allHosts.removeAll(state1.allHosts));
tester.activate(application2, state1App2.allHosts);
SystemState state3 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
SystemState state4 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state3.assertEquals(state2);
state4.assertEquals(state3);
tester.activate(application1, state4.allHosts);
SystemState state5 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
HostSpec removed = tester.removeOne(state5.allHosts);
tester.activate(application1, state5.allHosts);
assertEquals(removed.hostname(), tester.nodeRepository().getNodes(application1, Node.State.inactive).get(0).hostname());
SystemState state6 = prepare(application1, 0, 2, 0, 3, defaultResources, tester);
tester.activate(application1, state6.allHosts);
assertEquals(5, tester.getNodes(application1, Node.State.active).size());
assertEquals(5, tester.getNodes(application1, Node.State.inactive).size());
NestedTransaction removeTransaction = new NestedTransaction();
tester.provisioner().remove(removeTransaction, application1);
removeTransaction.commit();
assertEquals(tester.toHostNames(state1.allHosts), tester.toHostNames(tester.nodeRepository().getNodes(application1, Node.State.inactive)));
assertEquals(0, tester.getNodes(application1, Node.State.active).size());
assertEquals(state1App2.hostNames(), tester.toHostNames(tester.nodeRepository().getNodes(application2, Node.State.active)));
HostSpec failed = tester.removeOne(state1App2.allHosts);
tester.fail(failed);
assertEquals(9, tester.getNodes(application2, Node.State.active).size());
SystemState state2App2 = prepare(application2, 2, 2, 3, 3, defaultResources, tester);
assertFalse("Hosts to different apps are disjunct", state2App2.allHosts.removeAll(state1.allHosts));
assertEquals("A new node was reserved to replace the failed one", 10, state2App2.allHosts.size());
assertFalse("The new host is not the failed one", state2App2.allHosts.contains(failed));
tester.activate(application2, state2App2.allHosts);
SystemState state7 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state7.assertEquals(state1);
tester.activate(application1, state7.allHosts);
assertEquals(0, tester.getNodes(application1, Node.State.inactive).size());
HostFilter allFilter = HostFilter.all();
HostFilter hostFilter = HostFilter.hostname(state6.allHosts.iterator().next().hostname());
HostFilter clusterTypeFilter = HostFilter.clusterType(ClusterSpec.Type.container);
HostFilter clusterIdFilter = HostFilter.clusterId(ClusterSpec.Id.from("container1"));
tester.provisioner().restart(application1, allFilter);
tester.provisioner().restart(application1, hostFilter);
tester.provisioner().restart(application1, clusterTypeFilter);
tester.provisioner().restart(application1, clusterIdFilter);
tester.assertRestartCount(application1, allFilter, hostFilter, clusterTypeFilter, clusterIdFilter);
}
@Test
public void nodeVersionIsReturnedIfSet() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application1 = tester.makeApplicationId();
SystemState state1 = prepare(application1, 1, 1, 1, 1, defaultResources, tester);
tester.activate(application1, state1.allHosts);
HostSpec host1 = state1.container0.iterator().next();
assertFalse(host1.version().isPresent());
Node node1 = tester.nodeRepository().getNode(host1.hostname()).get();
tester.nodeRepository().write(node1.with(node1.status().withVespaVersion(Version.fromString("1.2.3"))), () -> {});
SystemState state2 = prepare(application1, 1, 1, 1, 1, defaultResources, tester);
tester.activate(application1, state2.allHosts);
host1 = state2.container0.iterator().next();
assertEquals(Version.fromString("1.2.3"), host1.version().get());
}
@Test
@Test
public void application_deployment_variable_application_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(24, defaultResources);
SystemState state1 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 3, 4, 4, 5, defaultResources, tester);
state2.assertExtends(state1);
assertEquals("New nodes are reserved", 6, tester.getNodes(application1, Node.State.reserved).size());
tester.activate(application1, state2.allHosts);
SystemState state3 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state3.allHosts);
assertEquals("Superfluous container nodes are deactivated",
3-2 + 4-2, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Superfluous content nodes are retired",
4-3 + 5-3, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state4 = prepare(application1, 4, 5, 5, 6, defaultResources, tester);
assertEquals("Inactive nodes are reused", 0, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Earlier retired nodes are not unretired before activate",
4-3 + 5-3, tester.getNodes(application1, Node.State.active).retired().size());
state4.assertExtends(state2);
assertEquals("New and inactive nodes are reserved", 4 + 3, tester.getNodes(application1, Node.State.reserved).size());
HostSpec removed = state4.removeHost(tester.getNodes(application1, Node.State.active).retired().asList().get(0).hostname());
tester.activate(application1, state4.allHosts);
assertEquals("Retired active removed when activating became inactive",
1, tester.getNodes(application1, Node.State.inactive).asList().size());
assertEquals(removed.hostname(), tester.getNodes(application1, Node.State.inactive).asList().get(0).hostname());
assertEquals("Earlier retired nodes are unretired on activate",
0, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state5 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state5.allHosts);
assertEquals("Superfluous container nodes are also deactivated",
4-2 + 5-2 + 1, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Superfluous content nodes are retired",
5-3 + 6-3 - 1, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state6 = prepare(application1, 2, 2, 4, 3, defaultResources, tester);
tester.activate(application1, state6.allHosts);
assertEquals("One content node is unretired",
5-4 + 6-3 - 1, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state7 = prepare(application1, 8, 2, 2, 2, defaultResources, tester);
NestedTransaction removeTransaction = new NestedTransaction();
tester.provisioner().remove(removeTransaction, application1);
removeTransaction.commit();
assertEquals(0, tester.getNodes(application1, Node.State.active).size());
assertEquals(0, tester.getNodes(application1, Node.State.reserved).size());
}
@Test
public void application_deployment_multiple_flavors() {
NodeResources small = new NodeResources(1, 4, 10, 0.3);
NodeResources large = new NodeResources(8, 8, 40, 0.3);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(12, small);
SystemState state1 = prepare(application1, 2, 2, 4, 4, small, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 3, 3, small, tester);
tester.activate(application1, state2.allHosts);
tester.makeReadyNodes(16, large);
SystemState state3 = prepare(application1, 3, 4, 4, 5, large, tester);
assertEquals("New nodes are reserved", 16, tester.nodeRepository().getNodes(application1, Node.State.reserved).size());
tester.activate(application1, state3.allHosts);
assertEquals("small container nodes are retired because we are swapping the entire cluster",
2 + 2, tester.getNodes(application1, Node.State.active).retired().type(ClusterSpec.Type.container).resources(small).size());
assertEquals("'small content nodes are retired",
4 + 4, tester.getNodes(application1, Node.State.active).retired().type(ClusterSpec.Type.content).resources(small).size());
assertEquals("No large content nodes are retired",
0, tester.getNodes(application1, Node.State.active).retired().resources(large).size());
}
@Test
public void application_deployment_above_then_at_capacity_limit() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(5, defaultResources);
SystemState state1 = prepare(application1, 2, 0, 3, 0, defaultResources, tester);
tester.activate(application1, state1.allHosts);
try {
SystemState state2 = prepare(application1, 3, 0, 3, 0, defaultResources, tester);
fail("Expected out of capacity exception");
}
catch (OutOfCapacityException expected) {
}
SystemState state3 = prepare(application1, 2, 0, 3, 0, defaultResources, tester);
tester.activate(application1, state3.allHosts);
}
@Test
public void dev_deployment_node_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void requested_resources_info_is_retained() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
{
SystemState state = prepare(application, 0, 0, 3, 3,
defaultResources.justNumbers(),
tester);
assertEquals(6, state.allHosts.size());
tester.activate(application, state.allHosts);
assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.any));
assertTrue(tester.nodeRepository().getNodes(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any));
}
{
SystemState state = prepare(application, 0, 0, 5, 3,
defaultResources,
tester);
assertEquals(8, state.allHosts.size());
tester.activate(application, state.allHosts);
assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.fast));
assertTrue(tester.nodeRepository().getNodes(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.fast));
}
}
@Test
public void deploy_specific_vespa_version() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, tester, 2, 2, 3, 3, defaultResources, "6.91");
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void deploy_specific_vespa_version_and_docker_image() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
String dockerImageRepo = "docker.domain.tld/my/image";
SystemState state = prepare(application, tester, 2, 2, 3, 3, false, defaultResources, "6.91", Optional.of(dockerImageRepo));
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void test_deployment_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.test, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(4, defaultResources);
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test(expected = IllegalArgumentException.class)
public void prod_deployment_requires_redundancy() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
prepare(application, 1, 2, 3, 3, defaultResources, tester);
}
/** Dev always uses the zone default flavor */
@Test
public void dev_deployment_flavor() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, new NodeResources(2, 4, 10, 2), NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3,
new NodeResources(2, 4, 10, 2), tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
/** Test always uses the zone default resources */
@Test
public void test_deployment_resources() {
NodeResources large = new NodeResources(2, 4, 10, 0.3);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.test, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(4, large);
SystemState state = prepare(application, 2, 2, 3, 3, large, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void staging_deployment_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.staging, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(14, defaultResources);
SystemState state = prepare(application, 1, 1, 1, 64, defaultResources, tester);
assertEquals(9, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void activate_after_reservation_timeout() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(10, defaultResources);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
NestedTransaction deactivateTransaction = new NestedTransaction();
tester.nodeRepository().deactivate(application, deactivateTransaction);
deactivateTransaction.commit();
try {
tester.activate(application, state.allHosts);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertTrue(e.getMessage().startsWith("Activation of " + application + " failed"));
}
}
@Test
public void out_of_capacity() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(9, defaultResources);
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, 2, 2, 3, 3, defaultResources, tester);
fail("Expected exception");
}
catch (OutOfCapacityException e) {
assertTrue(e.getMessage().startsWith("Could not satisfy request"));
}
}
@Test
public void out_of_quota() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.Public,
Environment.prod,
RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources);
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, 2, 2, 6, 3, defaultResources, tester);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("6 nodes [vcpu: 1.0, memory: 4.0 Gb, disk 10.0 Gb, bandwidth: 4.0 Gbps] requested for content cluster 'content0' 6.42 exceeds your quota. Resolve this at https:
e.getMessage());
}
}
@Test
public void no_out_of_quota_outside_public() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.main,
Environment.prod,
RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources);
ApplicationId application = tester.makeApplicationId();
prepare(application, 2, 2, 6, 3, defaultResources, tester);
}
@Test
public void out_of_capacity_but_cannot_fail() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources);
ApplicationId application = tester.makeApplicationId();
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build();
tester.prepare(application, cluster, Capacity.fromCount(5, Optional.empty(), false, false), 1);
}
@Test
public void out_of_capacity_all_nodes_want_to_retire() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
List<Node> readyNodes = tester.makeReadyNodes(5, defaultResources);
readyNodes.forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
try {
prepare(application, 2, 0, 2, 0, defaultResources, tester);
fail("Expected exception");
} catch (OutOfCapacityException e) {
assertTrue(e.getMessage().startsWith("Could not satisfy request"));
}
}
@Test
public void want_to_retire_but_cannot_fail() {
Capacity capacity = Capacity.fromCount(5, Optional.of(defaultResources), false, true);
Capacity capacityFORCED = Capacity.fromCount(5, Optional.of(defaultResources), false, false);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build();
tester.activate(application, tester.prepare(application, cluster, capacity, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(0, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
tester.nodeRepository().getNodes(application, Node.State.active).forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
tester.activate(application, tester.prepare(application, cluster, capacityFORCED, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(0, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
tester.nodeRepository().getNodes(application, Node.State.active).forEach(node -> assertTrue(node.status().wantToRetire()));
tester.activate(application, tester.prepare(application, cluster, capacity, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
}
@Test
public void highest_node_indexes_are_retired_first() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(14, defaultResources);
SystemState state1 = prepare(application1, 3, 3, 4, 4, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 2, 2, defaultResources, tester);
tester.activate(application1, state2.allHosts);
assertFalse(state2.hostByMembership("content0", 0, 0).membership().get().retired());
assertFalse( state2.hostByMembership("content0", 0, 1).membership().get().retired());
assertTrue( state2.hostByMembership("content0", 0, 2).membership().get().retired());
assertTrue( state2.hostByMembership("content0", 0, 3).membership().get().retired());
assertFalse(state2.hostByMembership("content1", 0, 0).membership().get().retired());
assertFalse(state2.hostByMembership("content1", 0, 1).membership().get().retired());
assertTrue( state2.hostByMembership("content1", 0, 2).membership().get().retired());
assertTrue( state2.hostByMembership("content1", 0, 3).membership().get().retired());
}
@Test
public void application_deployment_retires_nodes_that_want_to_retire() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
{
SystemState state = prepare(application, 2, 0, 2, 0,defaultResources, tester);
tester.activate(application, state.allHosts);
assertEquals(4, tester.getNodes(application, Node.State.active).size());
}
{
List<Node> nodesToRetire = tester.getNodes(application, Node.State.active).asList().subList(0, 2);
nodesToRetire.forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
SystemState state = prepare(application, 2, 0, 2, 0, defaultResources, tester);
tester.activate(application, state.allHosts);
List<Node> retiredNodes = tester.getNodes(application).retired().asList();
assertEquals(2, retiredNodes.size());
assertTrue("Nodes are retired by system", retiredNodes.stream().allMatch(retiredBy(Agent.system)));
}
}
@Test
public void application_deployment_extends_existing_reservations_on_deploy() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(2, defaultResources);
try {
prepare(application, 2, 0, 2, 0, defaultResources, tester);
fail("Expected exception");
} catch (OutOfCapacityException ignored) {}
assertEquals("Reserved a subset of required nodes", 2,
tester.getNodes(application, Node.State.reserved).size());
tester.makeReadyNodes(2, defaultResources);
tester.clock().advance(Duration.ofMinutes(2));
SystemState state = prepare(application, 2, 0, 2, 0, defaultResources, tester);
List<Node> reserved = tester.getNodes(application, Node.State.reserved).asList();
assertEquals("Reserved required nodes", 4, reserved.size());
assertTrue("Time of event is updated for all nodes",
reserved.stream()
.allMatch(n -> n.history()
.event(History.Event.Type.reserved)
.get().at()
.equals(tester.clock().instant().truncatedTo(MILLIS))));
tester.clock().advance(Duration.ofMinutes(8).plus(Duration.ofSeconds(1)));
ReservationExpirer expirer = new ReservationExpirer(tester.nodeRepository(), tester.clock(),
Duration.ofMinutes(10));
expirer.run();
assertEquals("Nodes remain reserved", 4,
tester.getNodes(application, Node.State.reserved).size());
tester.activate(application, state.allHosts);
assertEquals(4, tester.getNodes(application, Node.State.active).size());
}
@Test
public void required_capacity_respects_prod_redundancy_requirement() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, "6.42", Optional.empty());
fail("Expected exception");
} catch (IllegalArgumentException ignored) {}
}
@Test
public void devsystem_application_deployment_on_devhost() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.dev, Environment.dev, RegionName.from("no-central"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.devhost, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.devhost);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void cluster_spec_update_for_already_reserved_nodes() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
String version1 = "6.42";
String version2 = "6.43";
tester.makeReadyNodes(2, defaultResources);
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, version1, Optional.empty());
tester.getNodes(application, Node.State.reserved).forEach(node ->
assertEquals(Version.fromString(version1), node.allocation().get().membership().cluster().vespaVersion()));
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, version2, Optional.empty());
tester.getNodes(application, Node.State.reserved).forEach(node ->
assertEquals(Version.fromString(version2), node.allocation().get().membership().cluster().vespaVersion()));
}
@Test
public void change_to_and_from_combined_cluster_does_not_change_node_allocation() {
var tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
var application = tester.makeApplicationId();
tester.makeReadyNodes(4, defaultResources);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
var initialNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
cluster = ClusterSpec.request(ClusterSpec.Type.combined, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
var newNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
assertEquals("Node allocation remains the same", initialNodes, newNodes);
assertEquals("Cluster type is updated",
Set.of(ClusterSpec.Type.combined),
newNodes.stream().map(n -> n.membership().get().cluster().type()).collect(Collectors.toSet()));
cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
newNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
assertEquals("Node allocation remains the same", initialNodes, newNodes);
assertEquals("Cluster type is updated",
Set.of(ClusterSpec.Type.content),
newNodes.stream().map(n -> n.membership().get().cluster().type()).collect(Collectors.toSet()));
}
private SystemState prepare(ApplicationId application, int container0Size, int container1Size, int content0Size,
int content1Size, NodeResources flavor, ProvisioningTester tester) {
return prepare(application, tester, container0Size, container1Size, content0Size, content1Size, flavor, "6.42");
}
private SystemState prepare(ApplicationId application, ProvisioningTester tester, int container0Size, int container1Size, int content0Size,
int content1Size, NodeResources nodeResources, String wantedVersion) {
return prepare(application, tester, container0Size, container1Size, content0Size, content1Size, false, nodeResources,
wantedVersion, Optional.empty());
}
private SystemState prepare(ApplicationId application, ProvisioningTester tester, int container0Size, int container1Size, int content0Size,
int content1Size, boolean required, NodeResources nodeResources, String wantedVersion, Optional<String> dockerImageRepo) {
ClusterSpec containerCluster0 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container0")).vespaVersion(wantedVersion).build();
ClusterSpec containerCluster1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container1")).vespaVersion(wantedVersion).build();
ClusterSpec contentCluster0 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content0")).vespaVersion(wantedVersion).build();
ClusterSpec contentCluster1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content1")).vespaVersion(wantedVersion).build();
Set<HostSpec> container0 = prepare(application, tester, containerCluster0, container0Size, 1, required, nodeResources);
Set<HostSpec> container1 = prepare(application, tester, containerCluster1, container1Size, 1, required, nodeResources);
Set<HostSpec> content0 = prepare(application, tester, contentCluster0, content0Size, 1, required, nodeResources);
Set<HostSpec> content1 = prepare(application, tester, contentCluster1, content1Size, 1, required, nodeResources);
Set<HostSpec> allHosts = new HashSet<>();
allHosts.addAll(container0);
allHosts.addAll(container1);
allHosts.addAll(content0);
allHosts.addAll(content1);
Function<Integer, Capacity> capacity = count -> Capacity.fromCount(count, Optional.empty(), required, true);
int expectedContainer0Size = tester.capacityPolicies().decideSize(capacity.apply(container0Size), containerCluster0, application);
int expectedContainer1Size = tester.capacityPolicies().decideSize(capacity.apply(container1Size), containerCluster1, application);
int expectedContent0Size = tester.capacityPolicies().decideSize(capacity.apply(content0Size), contentCluster0, application);
int expectedContent1Size = tester.capacityPolicies().decideSize(capacity.apply(content1Size), contentCluster1, application);
assertEquals("Hosts in each group cluster is disjunct and the total number of unretired nodes is correct",
expectedContainer0Size + expectedContainer1Size + expectedContent0Size + expectedContent1Size,
tester.nonRetired(allHosts).size());
assertEquals(expectedContainer0Size, tester.nonRetired(container0).size());
assertEquals(expectedContainer1Size, tester.nonRetired(container1).size());
assertEquals(expectedContent0Size, tester.nonRetired(content0).size());
assertEquals(expectedContent1Size, tester.nonRetired(content1).size());
tester.assertMembersOf(containerCluster0, container0);
tester.assertMembersOf(containerCluster1, container1);
tester.assertMembersOf(contentCluster0, content0);
tester.assertMembersOf(contentCluster1, content1);
return new SystemState(allHosts, container0, container1, content0, content1);
}
private Set<HostSpec> prepare(ApplicationId application, ProvisioningTester tester, ClusterSpec cluster, int nodeCount, int groups,
boolean required, NodeResources nodeResources) {
if (nodeCount == 0) return Collections.emptySet();
return new HashSet<>(tester.prepare(application, cluster, nodeCount, groups, required, nodeResources));
}
private static class SystemState {
private Set<HostSpec> allHosts;
private Set<HostSpec> container0;
private Set<HostSpec> container1;
private Set<HostSpec> content0;
private Set<HostSpec> content1;
public SystemState(Set<HostSpec> allHosts,
Set<HostSpec> container1,
Set<HostSpec> container2,
Set<HostSpec> content0,
Set<HostSpec> content1) {
this.allHosts = allHosts;
this.container0 = container1;
this.container1 = container2;
this.content0 = content0;
this.content1 = content1;
}
/** Returns a host by cluster name and index, or null if there is no host with the given values in this */
public HostSpec hostByMembership(String clusterId, int group, int index) {
for (HostSpec host : allHosts) {
if ( ! host.membership().isPresent()) continue;
ClusterMembership membership = host.membership().get();
if (membership.cluster().id().value().equals(clusterId) &&
groupMatches(membership.cluster().group(), group) &&
membership.index() == index)
return host;
}
return null;
}
private boolean groupMatches(Optional<ClusterSpec.Group> clusterGroup, int group) {
if ( ! clusterGroup.isPresent()) return group==0;
return clusterGroup.get().index() == group;
}
public Set<String> hostNames() {
return allHosts.stream().map(HostSpec::hostname).collect(Collectors.toSet());
}
public HostSpec removeHost(String hostname) {
for (Iterator<HostSpec> i = allHosts.iterator(); i.hasNext();) {
HostSpec host = i.next();
if (host.hostname().equals(hostname)) {
i.remove();
return host;
}
}
return null;
}
public void assertExtends(SystemState other) {
assertTrue(this.allHosts.containsAll(other.allHosts));
assertExtends(this.container0, other.container0);
assertExtends(this.container1, other.container1);
assertExtends(this.content0, other.content0);
assertExtends(this.content1, other.content1);
}
private void assertExtends(Set<HostSpec> extension,
Set<HostSpec> original) {
for (HostSpec originalHost : original) {
HostSpec newHost = findHost(originalHost.hostname(), extension);
org.junit.Assert.assertEquals(newHost.membership(), originalHost.membership());
}
}
private HostSpec findHost(String hostName, Set<HostSpec> hosts) {
for (HostSpec host : hosts)
if (host.hostname().equals(hostName))
return host;
return null;
}
public void assertEquals(SystemState other) {
org.junit.Assert.assertEquals(this.allHosts, other.allHosts);
org.junit.Assert.assertEquals(this.container0, other.container0);
org.junit.Assert.assertEquals(this.container1, other.container1);
org.junit.Assert.assertEquals(this.content0, other.content0);
org.junit.Assert.assertEquals(this.content1, other.content1);
}
}
/** A predicate that returns whether a node has been retired by the given agent */
private static Predicate<Node> retiredBy(Agent agent) {
return (node) -> node.history().event(History.Event.Type.retired)
.filter(e -> e.type() == History.Event.Type.retired)
.filter(e -> e.agent() == agent)
.isPresent();
}
} |
test code updated | public void dockerImageRepoIsReturnedIfSet() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application1 = tester.makeApplicationId();
SystemState state1 = prepare(application1, tester, 1, 1, 1, 1, defaultResources, "1.2.3");
String dockerImageRepo = "docker.domain.tld/my/image";
prepare(application1, tester, 1, 1, 1 , 1 , false, defaultResources, "1.2.3", Optional.of(dockerImageRepo));
tester.activate(application1, state1.allHosts);
HostSpec host1 = state1.container0.iterator().next();
assertFalse(host1.version().isPresent());
Node node1 = tester.nodeRepository().getNode(host1.hostname()).get();
tester.nodeRepository().write(node1.with(node1.status().withDockerImage(DockerImage.fromString(dockerImageRepo))), () -> {});
SystemState state2 = prepare(application1, tester, 1, 1, 1 ,1 , false, defaultResources, "1.2.3", Optional.of(dockerImageRepo));
tester.activate(application1, state2.allHosts);
host1 = state2.container0.iterator().next();
assertEquals(dockerImageRepo, host1.dockerImageRepo().get());
} | tester.nodeRepository().write(node1.with(node1.status().withDockerImage(DockerImage.fromString(dockerImageRepo))), () -> {}); | public void dockerImageRepoIsReturnedIfSet() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application1 = tester.makeApplicationId();
SystemState state1 = prepare(application1, tester, 1, 1, 1, 1, defaultResources, "1.2.3");
String dockerImageRepo = "docker.domain.tld/my/image";
prepare(application1, tester, 1, 1, 1 , 1 , false, defaultResources, "1.2.3", Optional.of(dockerImageRepo));
tester.activate(application1, state1.allHosts);
HostSpec host1 = state1.container0.iterator().next();
Node node1 = tester.nodeRepository().getNode(host1.hostname()).get();
DockerImage dockerImage = DockerImage.fromString(dockerImageRepo).withTag(Version.fromString("1.2.3"));
tester.nodeRepository().write(node1.with(node1.status().withDockerImage(dockerImage)), () -> {});
SystemState state2 = prepare(application1, tester, 1, 1, 1 ,1 , false, defaultResources, "1.2.3", Optional.of(dockerImageRepo));
tester.activate(application1, state2.allHosts);
host1 = state2.container0.iterator().next();
node1 = tester.nodeRepository().getNode(host1.hostname()).get();
assertEquals(dockerImage, node1.status().dockerImage().get());
} | class ProvisioningTest {
private static final NodeResources defaultResources = new NodeResources(1, 4, 10, 4);
@Test
public void application_deployment_constant_application_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
ApplicationId application2 = tester.makeApplicationId();
tester.makeReadyNodes(21, defaultResources);
SystemState state1 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state2.assertEquals(state1);
tester.activate(application1, state2.allHosts);
SystemState state1App2 = prepare(application2, 2, 2, 3, 3, defaultResources, tester);
assertFalse("Hosts to different apps are disjunct", state1App2.allHosts.removeAll(state1.allHosts));
tester.activate(application2, state1App2.allHosts);
SystemState state3 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
SystemState state4 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state3.assertEquals(state2);
state4.assertEquals(state3);
tester.activate(application1, state4.allHosts);
SystemState state5 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
HostSpec removed = tester.removeOne(state5.allHosts);
tester.activate(application1, state5.allHosts);
assertEquals(removed.hostname(), tester.nodeRepository().getNodes(application1, Node.State.inactive).get(0).hostname());
SystemState state6 = prepare(application1, 0, 2, 0, 3, defaultResources, tester);
tester.activate(application1, state6.allHosts);
assertEquals(5, tester.getNodes(application1, Node.State.active).size());
assertEquals(5, tester.getNodes(application1, Node.State.inactive).size());
NestedTransaction removeTransaction = new NestedTransaction();
tester.provisioner().remove(removeTransaction, application1);
removeTransaction.commit();
assertEquals(tester.toHostNames(state1.allHosts), tester.toHostNames(tester.nodeRepository().getNodes(application1, Node.State.inactive)));
assertEquals(0, tester.getNodes(application1, Node.State.active).size());
assertEquals(state1App2.hostNames(), tester.toHostNames(tester.nodeRepository().getNodes(application2, Node.State.active)));
HostSpec failed = tester.removeOne(state1App2.allHosts);
tester.fail(failed);
assertEquals(9, tester.getNodes(application2, Node.State.active).size());
SystemState state2App2 = prepare(application2, 2, 2, 3, 3, defaultResources, tester);
assertFalse("Hosts to different apps are disjunct", state2App2.allHosts.removeAll(state1.allHosts));
assertEquals("A new node was reserved to replace the failed one", 10, state2App2.allHosts.size());
assertFalse("The new host is not the failed one", state2App2.allHosts.contains(failed));
tester.activate(application2, state2App2.allHosts);
SystemState state7 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state7.assertEquals(state1);
tester.activate(application1, state7.allHosts);
assertEquals(0, tester.getNodes(application1, Node.State.inactive).size());
HostFilter allFilter = HostFilter.all();
HostFilter hostFilter = HostFilter.hostname(state6.allHosts.iterator().next().hostname());
HostFilter clusterTypeFilter = HostFilter.clusterType(ClusterSpec.Type.container);
HostFilter clusterIdFilter = HostFilter.clusterId(ClusterSpec.Id.from("container1"));
tester.provisioner().restart(application1, allFilter);
tester.provisioner().restart(application1, hostFilter);
tester.provisioner().restart(application1, clusterTypeFilter);
tester.provisioner().restart(application1, clusterIdFilter);
tester.assertRestartCount(application1, allFilter, hostFilter, clusterTypeFilter, clusterIdFilter);
}
@Test
public void nodeVersionIsReturnedIfSet() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application1 = tester.makeApplicationId();
SystemState state1 = prepare(application1, 1, 1, 1, 1, defaultResources, tester);
tester.activate(application1, state1.allHosts);
HostSpec host1 = state1.container0.iterator().next();
assertFalse(host1.version().isPresent());
Node node1 = tester.nodeRepository().getNode(host1.hostname()).get();
tester.nodeRepository().write(node1.with(node1.status().withVespaVersion(Version.fromString("1.2.3"))), () -> {});
SystemState state2 = prepare(application1, 1, 1, 1, 1, defaultResources, tester);
tester.activate(application1, state2.allHosts);
host1 = state2.container0.iterator().next();
assertEquals(Version.fromString("1.2.3"), host1.version().get());
}
@Test
@Test
public void application_deployment_variable_application_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(24, defaultResources);
SystemState state1 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 3, 4, 4, 5, defaultResources, tester);
state2.assertExtends(state1);
assertEquals("New nodes are reserved", 6, tester.getNodes(application1, Node.State.reserved).size());
tester.activate(application1, state2.allHosts);
SystemState state3 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state3.allHosts);
assertEquals("Superfluous container nodes are deactivated",
3-2 + 4-2, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Superfluous content nodes are retired",
4-3 + 5-3, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state4 = prepare(application1, 4, 5, 5, 6, defaultResources, tester);
assertEquals("Inactive nodes are reused", 0, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Earlier retired nodes are not unretired before activate",
4-3 + 5-3, tester.getNodes(application1, Node.State.active).retired().size());
state4.assertExtends(state2);
assertEquals("New and inactive nodes are reserved", 4 + 3, tester.getNodes(application1, Node.State.reserved).size());
HostSpec removed = state4.removeHost(tester.getNodes(application1, Node.State.active).retired().asList().get(0).hostname());
tester.activate(application1, state4.allHosts);
assertEquals("Retired active removed when activating became inactive",
1, tester.getNodes(application1, Node.State.inactive).asList().size());
assertEquals(removed.hostname(), tester.getNodes(application1, Node.State.inactive).asList().get(0).hostname());
assertEquals("Earlier retired nodes are unretired on activate",
0, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state5 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state5.allHosts);
assertEquals("Superfluous container nodes are also deactivated",
4-2 + 5-2 + 1, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Superfluous content nodes are retired",
5-3 + 6-3 - 1, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state6 = prepare(application1, 2, 2, 4, 3, defaultResources, tester);
tester.activate(application1, state6.allHosts);
assertEquals("One content node is unretired",
5-4 + 6-3 - 1, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state7 = prepare(application1, 8, 2, 2, 2, defaultResources, tester);
NestedTransaction removeTransaction = new NestedTransaction();
tester.provisioner().remove(removeTransaction, application1);
removeTransaction.commit();
assertEquals(0, tester.getNodes(application1, Node.State.active).size());
assertEquals(0, tester.getNodes(application1, Node.State.reserved).size());
}
@Test
public void application_deployment_multiple_flavors() {
NodeResources small = new NodeResources(1, 4, 10, 0.3);
NodeResources large = new NodeResources(8, 8, 40, 0.3);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(12, small);
SystemState state1 = prepare(application1, 2, 2, 4, 4, small, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 3, 3, small, tester);
tester.activate(application1, state2.allHosts);
tester.makeReadyNodes(16, large);
SystemState state3 = prepare(application1, 3, 4, 4, 5, large, tester);
assertEquals("New nodes are reserved", 16, tester.nodeRepository().getNodes(application1, Node.State.reserved).size());
tester.activate(application1, state3.allHosts);
assertEquals("small container nodes are retired because we are swapping the entire cluster",
2 + 2, tester.getNodes(application1, Node.State.active).retired().type(ClusterSpec.Type.container).resources(small).size());
assertEquals("'small content nodes are retired",
4 + 4, tester.getNodes(application1, Node.State.active).retired().type(ClusterSpec.Type.content).resources(small).size());
assertEquals("No large content nodes are retired",
0, tester.getNodes(application1, Node.State.active).retired().resources(large).size());
}
@Test
public void application_deployment_above_then_at_capacity_limit() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(5, defaultResources);
SystemState state1 = prepare(application1, 2, 0, 3, 0, defaultResources, tester);
tester.activate(application1, state1.allHosts);
try {
SystemState state2 = prepare(application1, 3, 0, 3, 0, defaultResources, tester);
fail("Expected out of capacity exception");
}
catch (OutOfCapacityException expected) {
}
SystemState state3 = prepare(application1, 2, 0, 3, 0, defaultResources, tester);
tester.activate(application1, state3.allHosts);
}
@Test
public void dev_deployment_node_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void requested_resources_info_is_retained() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
{
SystemState state = prepare(application, 0, 0, 3, 3,
defaultResources.justNumbers(),
tester);
assertEquals(6, state.allHosts.size());
tester.activate(application, state.allHosts);
assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.any));
assertTrue(tester.nodeRepository().getNodes(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any));
}
{
SystemState state = prepare(application, 0, 0, 5, 3,
defaultResources,
tester);
assertEquals(8, state.allHosts.size());
tester.activate(application, state.allHosts);
assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.fast));
assertTrue(tester.nodeRepository().getNodes(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.fast));
}
}
@Test
public void deploy_specific_vespa_version() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, tester, 2, 2, 3, 3, defaultResources, "6.91");
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void deploy_specific_vespa_version_and_docker_image() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
String dockerImageRepo = "docker.domain.tld/my/image";
SystemState state = prepare(application, tester, 2, 2, 3, 3, false, defaultResources, "6.91", Optional.of(dockerImageRepo));
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void test_deployment_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.test, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(4, defaultResources);
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test(expected = IllegalArgumentException.class)
public void prod_deployment_requires_redundancy() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
prepare(application, 1, 2, 3, 3, defaultResources, tester);
}
/** Dev always uses the zone default flavor */
@Test
public void dev_deployment_flavor() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, new NodeResources(2, 4, 10, 2), NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3,
new NodeResources(2, 4, 10, 2), tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
/** Test always uses the zone default resources */
@Test
public void test_deployment_resources() {
NodeResources large = new NodeResources(2, 4, 10, 0.3);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.test, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(4, large);
SystemState state = prepare(application, 2, 2, 3, 3, large, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void staging_deployment_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.staging, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(14, defaultResources);
SystemState state = prepare(application, 1, 1, 1, 64, defaultResources, tester);
assertEquals(9, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void activate_after_reservation_timeout() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(10, defaultResources);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
NestedTransaction deactivateTransaction = new NestedTransaction();
tester.nodeRepository().deactivate(application, deactivateTransaction);
deactivateTransaction.commit();
try {
tester.activate(application, state.allHosts);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertTrue(e.getMessage().startsWith("Activation of " + application + " failed"));
}
}
@Test
public void out_of_capacity() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(9, defaultResources);
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, 2, 2, 3, 3, defaultResources, tester);
fail("Expected exception");
}
catch (OutOfCapacityException e) {
assertTrue(e.getMessage().startsWith("Could not satisfy request"));
}
}
@Test
public void out_of_quota() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.Public,
Environment.prod,
RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources);
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, 2, 2, 6, 3, defaultResources, tester);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("6 nodes [vcpu: 1.0, memory: 4.0 Gb, disk 10.0 Gb, bandwidth: 4.0 Gbps] requested for content cluster 'content0' 6.42 exceeds your quota. Resolve this at https:
e.getMessage());
}
}
@Test
public void no_out_of_quota_outside_public() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.main,
Environment.prod,
RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources);
ApplicationId application = tester.makeApplicationId();
prepare(application, 2, 2, 6, 3, defaultResources, tester);
}
@Test
public void out_of_capacity_but_cannot_fail() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources);
ApplicationId application = tester.makeApplicationId();
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build();
tester.prepare(application, cluster, Capacity.fromCount(5, Optional.empty(), false, false), 1);
}
@Test
public void out_of_capacity_all_nodes_want_to_retire() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
List<Node> readyNodes = tester.makeReadyNodes(5, defaultResources);
readyNodes.forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
try {
prepare(application, 2, 0, 2, 0, defaultResources, tester);
fail("Expected exception");
} catch (OutOfCapacityException e) {
assertTrue(e.getMessage().startsWith("Could not satisfy request"));
}
}
@Test
public void want_to_retire_but_cannot_fail() {
Capacity capacity = Capacity.fromCount(5, Optional.of(defaultResources), false, true);
Capacity capacityFORCED = Capacity.fromCount(5, Optional.of(defaultResources), false, false);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build();
tester.activate(application, tester.prepare(application, cluster, capacity, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(0, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
tester.nodeRepository().getNodes(application, Node.State.active).forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
tester.activate(application, tester.prepare(application, cluster, capacityFORCED, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(0, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
tester.nodeRepository().getNodes(application, Node.State.active).forEach(node -> assertTrue(node.status().wantToRetire()));
tester.activate(application, tester.prepare(application, cluster, capacity, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
}
@Test
public void highest_node_indexes_are_retired_first() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(14, defaultResources);
SystemState state1 = prepare(application1, 3, 3, 4, 4, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 2, 2, defaultResources, tester);
tester.activate(application1, state2.allHosts);
assertFalse(state2.hostByMembership("content0", 0, 0).membership().get().retired());
assertFalse( state2.hostByMembership("content0", 0, 1).membership().get().retired());
assertTrue( state2.hostByMembership("content0", 0, 2).membership().get().retired());
assertTrue( state2.hostByMembership("content0", 0, 3).membership().get().retired());
assertFalse(state2.hostByMembership("content1", 0, 0).membership().get().retired());
assertFalse(state2.hostByMembership("content1", 0, 1).membership().get().retired());
assertTrue( state2.hostByMembership("content1", 0, 2).membership().get().retired());
assertTrue( state2.hostByMembership("content1", 0, 3).membership().get().retired());
}
@Test
public void application_deployment_retires_nodes_that_want_to_retire() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
{
SystemState state = prepare(application, 2, 0, 2, 0,defaultResources, tester);
tester.activate(application, state.allHosts);
assertEquals(4, tester.getNodes(application, Node.State.active).size());
}
{
List<Node> nodesToRetire = tester.getNodes(application, Node.State.active).asList().subList(0, 2);
nodesToRetire.forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
SystemState state = prepare(application, 2, 0, 2, 0, defaultResources, tester);
tester.activate(application, state.allHosts);
List<Node> retiredNodes = tester.getNodes(application).retired().asList();
assertEquals(2, retiredNodes.size());
assertTrue("Nodes are retired by system", retiredNodes.stream().allMatch(retiredBy(Agent.system)));
}
}
@Test
public void application_deployment_extends_existing_reservations_on_deploy() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(2, defaultResources);
try {
prepare(application, 2, 0, 2, 0, defaultResources, tester);
fail("Expected exception");
} catch (OutOfCapacityException ignored) {}
assertEquals("Reserved a subset of required nodes", 2,
tester.getNodes(application, Node.State.reserved).size());
tester.makeReadyNodes(2, defaultResources);
tester.clock().advance(Duration.ofMinutes(2));
SystemState state = prepare(application, 2, 0, 2, 0, defaultResources, tester);
List<Node> reserved = tester.getNodes(application, Node.State.reserved).asList();
assertEquals("Reserved required nodes", 4, reserved.size());
assertTrue("Time of event is updated for all nodes",
reserved.stream()
.allMatch(n -> n.history()
.event(History.Event.Type.reserved)
.get().at()
.equals(tester.clock().instant().truncatedTo(MILLIS))));
tester.clock().advance(Duration.ofMinutes(8).plus(Duration.ofSeconds(1)));
ReservationExpirer expirer = new ReservationExpirer(tester.nodeRepository(), tester.clock(),
Duration.ofMinutes(10));
expirer.run();
assertEquals("Nodes remain reserved", 4,
tester.getNodes(application, Node.State.reserved).size());
tester.activate(application, state.allHosts);
assertEquals(4, tester.getNodes(application, Node.State.active).size());
}
@Test
public void required_capacity_respects_prod_redundancy_requirement() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, "6.42", Optional.empty());
fail("Expected exception");
} catch (IllegalArgumentException ignored) {}
}
@Test
public void devsystem_application_deployment_on_devhost() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.dev, Environment.dev, RegionName.from("no-central"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.devhost, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.devhost);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void cluster_spec_update_for_already_reserved_nodes() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
String version1 = "6.42";
String version2 = "6.43";
tester.makeReadyNodes(2, defaultResources);
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, version1, Optional.empty());
tester.getNodes(application, Node.State.reserved).forEach(node ->
assertEquals(Version.fromString(version1), node.allocation().get().membership().cluster().vespaVersion()));
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, version2, Optional.empty());
tester.getNodes(application, Node.State.reserved).forEach(node ->
assertEquals(Version.fromString(version2), node.allocation().get().membership().cluster().vespaVersion()));
}
@Test
public void change_to_and_from_combined_cluster_does_not_change_node_allocation() {
var tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
var application = tester.makeApplicationId();
tester.makeReadyNodes(4, defaultResources);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
var initialNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
cluster = ClusterSpec.request(ClusterSpec.Type.combined, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
var newNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
assertEquals("Node allocation remains the same", initialNodes, newNodes);
assertEquals("Cluster type is updated",
Set.of(ClusterSpec.Type.combined),
newNodes.stream().map(n -> n.membership().get().cluster().type()).collect(Collectors.toSet()));
cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
newNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
assertEquals("Node allocation remains the same", initialNodes, newNodes);
assertEquals("Cluster type is updated",
Set.of(ClusterSpec.Type.content),
newNodes.stream().map(n -> n.membership().get().cluster().type()).collect(Collectors.toSet()));
}
private SystemState prepare(ApplicationId application, int container0Size, int container1Size, int content0Size,
int content1Size, NodeResources flavor, ProvisioningTester tester) {
return prepare(application, tester, container0Size, container1Size, content0Size, content1Size, flavor, "6.42");
}
private SystemState prepare(ApplicationId application, ProvisioningTester tester, int container0Size, int container1Size, int content0Size,
int content1Size, NodeResources nodeResources, String wantedVersion) {
return prepare(application, tester, container0Size, container1Size, content0Size, content1Size, false, nodeResources,
wantedVersion, Optional.empty());
}
private SystemState prepare(ApplicationId application, ProvisioningTester tester, int container0Size, int container1Size, int content0Size,
int content1Size, boolean required, NodeResources nodeResources, String wantedVersion, Optional<String> dockerImageRepo) {
ClusterSpec containerCluster0 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container0")).vespaVersion(wantedVersion).build();
ClusterSpec containerCluster1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container1")).vespaVersion(wantedVersion).build();
ClusterSpec contentCluster0 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content0")).vespaVersion(wantedVersion).build();
ClusterSpec contentCluster1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content1")).vespaVersion(wantedVersion).build();
Set<HostSpec> container0 = prepare(application, tester, containerCluster0, container0Size, 1, required, nodeResources);
Set<HostSpec> container1 = prepare(application, tester, containerCluster1, container1Size, 1, required, nodeResources);
Set<HostSpec> content0 = prepare(application, tester, contentCluster0, content0Size, 1, required, nodeResources);
Set<HostSpec> content1 = prepare(application, tester, contentCluster1, content1Size, 1, required, nodeResources);
Set<HostSpec> allHosts = new HashSet<>();
allHosts.addAll(container0);
allHosts.addAll(container1);
allHosts.addAll(content0);
allHosts.addAll(content1);
Function<Integer, Capacity> capacity = count -> Capacity.fromCount(count, Optional.empty(), required, true);
int expectedContainer0Size = tester.capacityPolicies().decideSize(capacity.apply(container0Size), containerCluster0, application);
int expectedContainer1Size = tester.capacityPolicies().decideSize(capacity.apply(container1Size), containerCluster1, application);
int expectedContent0Size = tester.capacityPolicies().decideSize(capacity.apply(content0Size), contentCluster0, application);
int expectedContent1Size = tester.capacityPolicies().decideSize(capacity.apply(content1Size), contentCluster1, application);
assertEquals("Hosts in each group cluster is disjunct and the total number of unretired nodes is correct",
expectedContainer0Size + expectedContainer1Size + expectedContent0Size + expectedContent1Size,
tester.nonRetired(allHosts).size());
assertEquals(expectedContainer0Size, tester.nonRetired(container0).size());
assertEquals(expectedContainer1Size, tester.nonRetired(container1).size());
assertEquals(expectedContent0Size, tester.nonRetired(content0).size());
assertEquals(expectedContent1Size, tester.nonRetired(content1).size());
tester.assertMembersOf(containerCluster0, container0);
tester.assertMembersOf(containerCluster1, container1);
tester.assertMembersOf(contentCluster0, content0);
tester.assertMembersOf(contentCluster1, content1);
return new SystemState(allHosts, container0, container1, content0, content1);
}
private Set<HostSpec> prepare(ApplicationId application, ProvisioningTester tester, ClusterSpec cluster, int nodeCount, int groups,
boolean required, NodeResources nodeResources) {
if (nodeCount == 0) return Collections.emptySet();
return new HashSet<>(tester.prepare(application, cluster, nodeCount, groups, required, nodeResources));
}
private static class SystemState {
private Set<HostSpec> allHosts;
private Set<HostSpec> container0;
private Set<HostSpec> container1;
private Set<HostSpec> content0;
private Set<HostSpec> content1;
public SystemState(Set<HostSpec> allHosts,
Set<HostSpec> container1,
Set<HostSpec> container2,
Set<HostSpec> content0,
Set<HostSpec> content1) {
this.allHosts = allHosts;
this.container0 = container1;
this.container1 = container2;
this.content0 = content0;
this.content1 = content1;
}
/** Returns a host by cluster name and index, or null if there is no host with the given values in this */
public HostSpec hostByMembership(String clusterId, int group, int index) {
for (HostSpec host : allHosts) {
if ( ! host.membership().isPresent()) continue;
ClusterMembership membership = host.membership().get();
if (membership.cluster().id().value().equals(clusterId) &&
groupMatches(membership.cluster().group(), group) &&
membership.index() == index)
return host;
}
return null;
}
private boolean groupMatches(Optional<ClusterSpec.Group> clusterGroup, int group) {
if ( ! clusterGroup.isPresent()) return group==0;
return clusterGroup.get().index() == group;
}
public Set<String> hostNames() {
return allHosts.stream().map(HostSpec::hostname).collect(Collectors.toSet());
}
public HostSpec removeHost(String hostname) {
for (Iterator<HostSpec> i = allHosts.iterator(); i.hasNext();) {
HostSpec host = i.next();
if (host.hostname().equals(hostname)) {
i.remove();
return host;
}
}
return null;
}
public void assertExtends(SystemState other) {
assertTrue(this.allHosts.containsAll(other.allHosts));
assertExtends(this.container0, other.container0);
assertExtends(this.container1, other.container1);
assertExtends(this.content0, other.content0);
assertExtends(this.content1, other.content1);
}
private void assertExtends(Set<HostSpec> extension,
Set<HostSpec> original) {
for (HostSpec originalHost : original) {
HostSpec newHost = findHost(originalHost.hostname(), extension);
org.junit.Assert.assertEquals(newHost.membership(), originalHost.membership());
}
}
private HostSpec findHost(String hostName, Set<HostSpec> hosts) {
for (HostSpec host : hosts)
if (host.hostname().equals(hostName))
return host;
return null;
}
public void assertEquals(SystemState other) {
org.junit.Assert.assertEquals(this.allHosts, other.allHosts);
org.junit.Assert.assertEquals(this.container0, other.container0);
org.junit.Assert.assertEquals(this.container1, other.container1);
org.junit.Assert.assertEquals(this.content0, other.content0);
org.junit.Assert.assertEquals(this.content1, other.content1);
}
}
/** A predicate that returns whether a node has been retired by the given agent */
private static Predicate<Node> retiredBy(Agent agent) {
return (node) -> node.history().event(History.Event.Type.retired)
.filter(e -> e.type() == History.Event.Type.retired)
.filter(e -> e.agent() == agent)
.isPresent();
}
} | class ProvisioningTest {
private static final NodeResources defaultResources = new NodeResources(1, 4, 10, 4);
@Test
public void application_deployment_constant_application_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
ApplicationId application2 = tester.makeApplicationId();
tester.makeReadyNodes(21, defaultResources);
SystemState state1 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state2.assertEquals(state1);
tester.activate(application1, state2.allHosts);
SystemState state1App2 = prepare(application2, 2, 2, 3, 3, defaultResources, tester);
assertFalse("Hosts to different apps are disjunct", state1App2.allHosts.removeAll(state1.allHosts));
tester.activate(application2, state1App2.allHosts);
SystemState state3 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
SystemState state4 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state3.assertEquals(state2);
state4.assertEquals(state3);
tester.activate(application1, state4.allHosts);
SystemState state5 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
HostSpec removed = tester.removeOne(state5.allHosts);
tester.activate(application1, state5.allHosts);
assertEquals(removed.hostname(), tester.nodeRepository().getNodes(application1, Node.State.inactive).get(0).hostname());
SystemState state6 = prepare(application1, 0, 2, 0, 3, defaultResources, tester);
tester.activate(application1, state6.allHosts);
assertEquals(5, tester.getNodes(application1, Node.State.active).size());
assertEquals(5, tester.getNodes(application1, Node.State.inactive).size());
NestedTransaction removeTransaction = new NestedTransaction();
tester.provisioner().remove(removeTransaction, application1);
removeTransaction.commit();
assertEquals(tester.toHostNames(state1.allHosts), tester.toHostNames(tester.nodeRepository().getNodes(application1, Node.State.inactive)));
assertEquals(0, tester.getNodes(application1, Node.State.active).size());
assertEquals(state1App2.hostNames(), tester.toHostNames(tester.nodeRepository().getNodes(application2, Node.State.active)));
HostSpec failed = tester.removeOne(state1App2.allHosts);
tester.fail(failed);
assertEquals(9, tester.getNodes(application2, Node.State.active).size());
SystemState state2App2 = prepare(application2, 2, 2, 3, 3, defaultResources, tester);
assertFalse("Hosts to different apps are disjunct", state2App2.allHosts.removeAll(state1.allHosts));
assertEquals("A new node was reserved to replace the failed one", 10, state2App2.allHosts.size());
assertFalse("The new host is not the failed one", state2App2.allHosts.contains(failed));
tester.activate(application2, state2App2.allHosts);
SystemState state7 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state7.assertEquals(state1);
tester.activate(application1, state7.allHosts);
assertEquals(0, tester.getNodes(application1, Node.State.inactive).size());
HostFilter allFilter = HostFilter.all();
HostFilter hostFilter = HostFilter.hostname(state6.allHosts.iterator().next().hostname());
HostFilter clusterTypeFilter = HostFilter.clusterType(ClusterSpec.Type.container);
HostFilter clusterIdFilter = HostFilter.clusterId(ClusterSpec.Id.from("container1"));
tester.provisioner().restart(application1, allFilter);
tester.provisioner().restart(application1, hostFilter);
tester.provisioner().restart(application1, clusterTypeFilter);
tester.provisioner().restart(application1, clusterIdFilter);
tester.assertRestartCount(application1, allFilter, hostFilter, clusterTypeFilter, clusterIdFilter);
}
@Test
public void nodeVersionIsReturnedIfSet() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application1 = tester.makeApplicationId();
SystemState state1 = prepare(application1, 1, 1, 1, 1, defaultResources, tester);
tester.activate(application1, state1.allHosts);
HostSpec host1 = state1.container0.iterator().next();
assertFalse(host1.version().isPresent());
Node node1 = tester.nodeRepository().getNode(host1.hostname()).get();
tester.nodeRepository().write(node1.with(node1.status().withVespaVersion(Version.fromString("1.2.3"))), () -> {});
SystemState state2 = prepare(application1, 1, 1, 1, 1, defaultResources, tester);
tester.activate(application1, state2.allHosts);
host1 = state2.container0.iterator().next();
assertEquals(Version.fromString("1.2.3"), host1.version().get());
}
@Test
@Test
public void application_deployment_variable_application_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(24, defaultResources);
SystemState state1 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 3, 4, 4, 5, defaultResources, tester);
state2.assertExtends(state1);
assertEquals("New nodes are reserved", 6, tester.getNodes(application1, Node.State.reserved).size());
tester.activate(application1, state2.allHosts);
SystemState state3 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state3.allHosts);
assertEquals("Superfluous container nodes are deactivated",
3-2 + 4-2, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Superfluous content nodes are retired",
4-3 + 5-3, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state4 = prepare(application1, 4, 5, 5, 6, defaultResources, tester);
assertEquals("Inactive nodes are reused", 0, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Earlier retired nodes are not unretired before activate",
4-3 + 5-3, tester.getNodes(application1, Node.State.active).retired().size());
state4.assertExtends(state2);
assertEquals("New and inactive nodes are reserved", 4 + 3, tester.getNodes(application1, Node.State.reserved).size());
HostSpec removed = state4.removeHost(tester.getNodes(application1, Node.State.active).retired().asList().get(0).hostname());
tester.activate(application1, state4.allHosts);
assertEquals("Retired active removed when activating became inactive",
1, tester.getNodes(application1, Node.State.inactive).asList().size());
assertEquals(removed.hostname(), tester.getNodes(application1, Node.State.inactive).asList().get(0).hostname());
assertEquals("Earlier retired nodes are unretired on activate",
0, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state5 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state5.allHosts);
assertEquals("Superfluous container nodes are also deactivated",
4-2 + 5-2 + 1, tester.getNodes(application1, Node.State.inactive).size());
assertEquals("Superfluous content nodes are retired",
5-3 + 6-3 - 1, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state6 = prepare(application1, 2, 2, 4, 3, defaultResources, tester);
tester.activate(application1, state6.allHosts);
assertEquals("One content node is unretired",
5-4 + 6-3 - 1, tester.getNodes(application1, Node.State.active).retired().size());
SystemState state7 = prepare(application1, 8, 2, 2, 2, defaultResources, tester);
NestedTransaction removeTransaction = new NestedTransaction();
tester.provisioner().remove(removeTransaction, application1);
removeTransaction.commit();
assertEquals(0, tester.getNodes(application1, Node.State.active).size());
assertEquals(0, tester.getNodes(application1, Node.State.reserved).size());
}
@Test
public void application_deployment_multiple_flavors() {
NodeResources small = new NodeResources(1, 4, 10, 0.3);
NodeResources large = new NodeResources(8, 8, 40, 0.3);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(12, small);
SystemState state1 = prepare(application1, 2, 2, 4, 4, small, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 3, 3, small, tester);
tester.activate(application1, state2.allHosts);
tester.makeReadyNodes(16, large);
SystemState state3 = prepare(application1, 3, 4, 4, 5, large, tester);
assertEquals("New nodes are reserved", 16, tester.nodeRepository().getNodes(application1, Node.State.reserved).size());
tester.activate(application1, state3.allHosts);
assertEquals("small container nodes are retired because we are swapping the entire cluster",
2 + 2, tester.getNodes(application1, Node.State.active).retired().type(ClusterSpec.Type.container).resources(small).size());
assertEquals("'small content nodes are retired",
4 + 4, tester.getNodes(application1, Node.State.active).retired().type(ClusterSpec.Type.content).resources(small).size());
assertEquals("No large content nodes are retired",
0, tester.getNodes(application1, Node.State.active).retired().resources(large).size());
}
@Test
public void application_deployment_above_then_at_capacity_limit() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(5, defaultResources);
SystemState state1 = prepare(application1, 2, 0, 3, 0, defaultResources, tester);
tester.activate(application1, state1.allHosts);
try {
SystemState state2 = prepare(application1, 3, 0, 3, 0, defaultResources, tester);
fail("Expected out of capacity exception");
}
catch (OutOfCapacityException expected) {
}
SystemState state3 = prepare(application1, 2, 0, 3, 0, defaultResources, tester);
tester.activate(application1, state3.allHosts);
}
@Test
public void dev_deployment_node_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void requested_resources_info_is_retained() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
{
SystemState state = prepare(application, 0, 0, 3, 3,
defaultResources.justNumbers(),
tester);
assertEquals(6, state.allHosts.size());
tester.activate(application, state.allHosts);
assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.any));
assertTrue(tester.nodeRepository().getNodes(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any));
}
{
SystemState state = prepare(application, 0, 0, 5, 3,
defaultResources,
tester);
assertEquals(8, state.allHosts.size());
tester.activate(application, state.allHosts);
assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.fast));
assertTrue(tester.nodeRepository().getNodes(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.fast));
}
}
@Test
public void deploy_specific_vespa_version() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, tester, 2, 2, 3, 3, defaultResources, "6.91");
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void deploy_specific_vespa_version_and_docker_image() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
String dockerImageRepo = "docker.domain.tld/my/image";
SystemState state = prepare(application, tester, 2, 2, 3, 3, false, defaultResources, "6.91", Optional.of(dockerImageRepo));
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void test_deployment_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.test, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(4, defaultResources);
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test(expected = IllegalArgumentException.class)
public void prod_deployment_requires_redundancy() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
prepare(application, 1, 2, 3, 3, defaultResources, tester);
}
/** Dev always uses the zone default flavor */
@Test
public void dev_deployment_flavor() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, new NodeResources(2, 4, 10, 2), NodeType.host, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.host);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3,
new NodeResources(2, 4, 10, 2), tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
/** Test always uses the zone default resources */
@Test
public void test_deployment_resources() {
NodeResources large = new NodeResources(2, 4, 10, 0.3);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.test, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(4, large);
SystemState state = prepare(application, 2, 2, 3, 3, large, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void staging_deployment_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.staging, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(14, defaultResources);
SystemState state = prepare(application, 1, 1, 1, 64, defaultResources, tester);
assertEquals(9, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void activate_after_reservation_timeout() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(10, defaultResources);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
NestedTransaction deactivateTransaction = new NestedTransaction();
tester.nodeRepository().deactivate(application, deactivateTransaction);
deactivateTransaction.commit();
try {
tester.activate(application, state.allHosts);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertTrue(e.getMessage().startsWith("Activation of " + application + " failed"));
}
}
@Test
public void out_of_capacity() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(9, defaultResources);
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, 2, 2, 3, 3, defaultResources, tester);
fail("Expected exception");
}
catch (OutOfCapacityException e) {
assertTrue(e.getMessage().startsWith("Could not satisfy request"));
}
}
@Test
public void out_of_quota() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.Public,
Environment.prod,
RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources);
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, 2, 2, 6, 3, defaultResources, tester);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("6 nodes [vcpu: 1.0, memory: 4.0 Gb, disk 10.0 Gb, bandwidth: 4.0 Gbps] requested for content cluster 'content0' 6.42 exceeds your quota. Resolve this at https:
e.getMessage());
}
}
@Test
public void no_out_of_quota_outside_public() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.main,
Environment.prod,
RegionName.from("us-east"))).build();
tester.makeReadyNodes(13, defaultResources);
ApplicationId application = tester.makeApplicationId();
prepare(application, 2, 2, 6, 3, defaultResources, tester);
}
@Test
public void out_of_capacity_but_cannot_fail() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(4, defaultResources);
ApplicationId application = tester.makeApplicationId();
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build();
tester.prepare(application, cluster, Capacity.fromCount(5, Optional.empty(), false, false), 1);
}
@Test
public void out_of_capacity_all_nodes_want_to_retire() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
List<Node> readyNodes = tester.makeReadyNodes(5, defaultResources);
readyNodes.forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
try {
prepare(application, 2, 0, 2, 0, defaultResources, tester);
fail("Expected exception");
} catch (OutOfCapacityException e) {
assertTrue(e.getMessage().startsWith("Could not satisfy request"));
}
}
@Test
public void want_to_retire_but_cannot_fail() {
Capacity capacity = Capacity.fromCount(5, Optional.of(defaultResources), false, true);
Capacity capacityFORCED = Capacity.fromCount(5, Optional.of(defaultResources), false, false);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build();
tester.activate(application, tester.prepare(application, cluster, capacity, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(0, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
tester.nodeRepository().getNodes(application, Node.State.active).forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
tester.activate(application, tester.prepare(application, cluster, capacityFORCED, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(0, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
tester.nodeRepository().getNodes(application, Node.State.active).forEach(node -> assertTrue(node.status().wantToRetire()));
tester.activate(application, tester.prepare(application, cluster, capacity, 1));
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).not().retired().size());
assertEquals(5, NodeList.copyOf(tester.nodeRepository().getNodes(application, Node.State.active)).retired().size());
}
@Test
public void highest_node_indexes_are_retired_first() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(14, defaultResources);
SystemState state1 = prepare(application1, 3, 3, 4, 4, defaultResources, tester);
tester.activate(application1, state1.allHosts);
SystemState state2 = prepare(application1, 2, 2, 2, 2, defaultResources, tester);
tester.activate(application1, state2.allHosts);
assertFalse(state2.hostByMembership("content0", 0, 0).membership().get().retired());
assertFalse( state2.hostByMembership("content0", 0, 1).membership().get().retired());
assertTrue( state2.hostByMembership("content0", 0, 2).membership().get().retired());
assertTrue( state2.hostByMembership("content0", 0, 3).membership().get().retired());
assertFalse(state2.hostByMembership("content1", 0, 0).membership().get().retired());
assertFalse(state2.hostByMembership("content1", 0, 1).membership().get().retired());
assertTrue( state2.hostByMembership("content1", 0, 2).membership().get().retired());
assertTrue( state2.hostByMembership("content1", 0, 3).membership().get().retired());
}
@Test
public void application_deployment_retires_nodes_that_want_to_retire() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(10, defaultResources);
{
SystemState state = prepare(application, 2, 0, 2, 0,defaultResources, tester);
tester.activate(application, state.allHosts);
assertEquals(4, tester.getNodes(application, Node.State.active).size());
}
{
List<Node> nodesToRetire = tester.getNodes(application, Node.State.active).asList().subList(0, 2);
nodesToRetire.forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
SystemState state = prepare(application, 2, 0, 2, 0, defaultResources, tester);
tester.activate(application, state.allHosts);
List<Node> retiredNodes = tester.getNodes(application).retired().asList();
assertEquals(2, retiredNodes.size());
assertTrue("Nodes are retired by system", retiredNodes.stream().allMatch(retiredBy(Agent.system)));
}
}
@Test
public void application_deployment_extends_existing_reservations_on_deploy() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
tester.makeReadyNodes(2, defaultResources);
try {
prepare(application, 2, 0, 2, 0, defaultResources, tester);
fail("Expected exception");
} catch (OutOfCapacityException ignored) {}
assertEquals("Reserved a subset of required nodes", 2,
tester.getNodes(application, Node.State.reserved).size());
tester.makeReadyNodes(2, defaultResources);
tester.clock().advance(Duration.ofMinutes(2));
SystemState state = prepare(application, 2, 0, 2, 0, defaultResources, tester);
List<Node> reserved = tester.getNodes(application, Node.State.reserved).asList();
assertEquals("Reserved required nodes", 4, reserved.size());
assertTrue("Time of event is updated for all nodes",
reserved.stream()
.allMatch(n -> n.history()
.event(History.Event.Type.reserved)
.get().at()
.equals(tester.clock().instant().truncatedTo(MILLIS))));
tester.clock().advance(Duration.ofMinutes(8).plus(Duration.ofSeconds(1)));
ReservationExpirer expirer = new ReservationExpirer(tester.nodeRepository(), tester.clock(),
Duration.ofMinutes(10));
expirer.run();
assertEquals("Nodes remain reserved", 4,
tester.getNodes(application, Node.State.reserved).size());
tester.activate(application, state.allHosts);
assertEquals(4, tester.getNodes(application, Node.State.active).size());
}
@Test
public void required_capacity_respects_prod_redundancy_requirement() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
try {
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, "6.42", Optional.empty());
fail("Expected exception");
} catch (IllegalArgumentException ignored) {}
}
@Test
public void devsystem_application_deployment_on_devhost() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.dev, Environment.dev, RegionName.from("no-central"))).build();
tester.makeReadyNodes(4, defaultResources, NodeType.devhost, 1);
tester.prepareAndActivateInfraApplication(tester.makeApplicationId(), NodeType.devhost);
ApplicationId application = tester.makeApplicationId();
SystemState state = prepare(application, 2, 2, 3, 3, defaultResources, tester);
assertEquals(4, state.allHosts.size());
tester.activate(application, state.allHosts);
}
@Test
public void cluster_spec_update_for_already_reserved_nodes() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).build();
ApplicationId application = tester.makeApplicationId();
String version1 = "6.42";
String version2 = "6.43";
tester.makeReadyNodes(2, defaultResources);
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, version1, Optional.empty());
tester.getNodes(application, Node.State.reserved).forEach(node ->
assertEquals(Version.fromString(version1), node.allocation().get().membership().cluster().vespaVersion()));
prepare(application, tester, 1, 0, 1, 0, true, defaultResources, version2, Optional.empty());
tester.getNodes(application, Node.State.reserved).forEach(node ->
assertEquals(Version.fromString(version2), node.allocation().get().membership().cluster().vespaVersion()));
}
@Test
public void change_to_and_from_combined_cluster_does_not_change_node_allocation() {
var tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
var application = tester.makeApplicationId();
tester.makeReadyNodes(4, defaultResources);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
var initialNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
cluster = ClusterSpec.request(ClusterSpec.Type.combined, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
var newNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
assertEquals("Node allocation remains the same", initialNodes, newNodes);
assertEquals("Cluster type is updated",
Set.of(ClusterSpec.Type.combined),
newNodes.stream().map(n -> n.membership().get().cluster().type()).collect(Collectors.toSet()));
cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
newNodes = tester.activate(application, tester.prepare(application, cluster,
Capacity.fromCount(2, defaultResources, false, false),
1));
assertEquals("Node allocation remains the same", initialNodes, newNodes);
assertEquals("Cluster type is updated",
Set.of(ClusterSpec.Type.content),
newNodes.stream().map(n -> n.membership().get().cluster().type()).collect(Collectors.toSet()));
}
private SystemState prepare(ApplicationId application, int container0Size, int container1Size, int content0Size,
int content1Size, NodeResources flavor, ProvisioningTester tester) {
return prepare(application, tester, container0Size, container1Size, content0Size, content1Size, flavor, "6.42");
}
private SystemState prepare(ApplicationId application, ProvisioningTester tester, int container0Size, int container1Size, int content0Size,
int content1Size, NodeResources nodeResources, String wantedVersion) {
return prepare(application, tester, container0Size, container1Size, content0Size, content1Size, false, nodeResources,
wantedVersion, Optional.empty());
}
private SystemState prepare(ApplicationId application, ProvisioningTester tester, int container0Size, int container1Size, int content0Size,
int content1Size, boolean required, NodeResources nodeResources, String wantedVersion, Optional<String> dockerImageRepo) {
ClusterSpec containerCluster0 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container0")).vespaVersion(wantedVersion).build();
ClusterSpec containerCluster1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container1")).vespaVersion(wantedVersion).build();
ClusterSpec contentCluster0 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content0")).vespaVersion(wantedVersion).build();
ClusterSpec contentCluster1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content1")).vespaVersion(wantedVersion).build();
Set<HostSpec> container0 = prepare(application, tester, containerCluster0, container0Size, 1, required, nodeResources);
Set<HostSpec> container1 = prepare(application, tester, containerCluster1, container1Size, 1, required, nodeResources);
Set<HostSpec> content0 = prepare(application, tester, contentCluster0, content0Size, 1, required, nodeResources);
Set<HostSpec> content1 = prepare(application, tester, contentCluster1, content1Size, 1, required, nodeResources);
Set<HostSpec> allHosts = new HashSet<>();
allHosts.addAll(container0);
allHosts.addAll(container1);
allHosts.addAll(content0);
allHosts.addAll(content1);
Function<Integer, Capacity> capacity = count -> Capacity.fromCount(count, Optional.empty(), required, true);
int expectedContainer0Size = tester.capacityPolicies().decideSize(capacity.apply(container0Size), containerCluster0, application);
int expectedContainer1Size = tester.capacityPolicies().decideSize(capacity.apply(container1Size), containerCluster1, application);
int expectedContent0Size = tester.capacityPolicies().decideSize(capacity.apply(content0Size), contentCluster0, application);
int expectedContent1Size = tester.capacityPolicies().decideSize(capacity.apply(content1Size), contentCluster1, application);
assertEquals("Hosts in each group cluster is disjunct and the total number of unretired nodes is correct",
expectedContainer0Size + expectedContainer1Size + expectedContent0Size + expectedContent1Size,
tester.nonRetired(allHosts).size());
assertEquals(expectedContainer0Size, tester.nonRetired(container0).size());
assertEquals(expectedContainer1Size, tester.nonRetired(container1).size());
assertEquals(expectedContent0Size, tester.nonRetired(content0).size());
assertEquals(expectedContent1Size, tester.nonRetired(content1).size());
tester.assertMembersOf(containerCluster0, container0);
tester.assertMembersOf(containerCluster1, container1);
tester.assertMembersOf(contentCluster0, content0);
tester.assertMembersOf(contentCluster1, content1);
return new SystemState(allHosts, container0, container1, content0, content1);
}
private Set<HostSpec> prepare(ApplicationId application, ProvisioningTester tester, ClusterSpec cluster, int nodeCount, int groups,
boolean required, NodeResources nodeResources) {
if (nodeCount == 0) return Collections.emptySet();
return new HashSet<>(tester.prepare(application, cluster, nodeCount, groups, required, nodeResources));
}
private static class SystemState {
private Set<HostSpec> allHosts;
private Set<HostSpec> container0;
private Set<HostSpec> container1;
private Set<HostSpec> content0;
private Set<HostSpec> content1;
public SystemState(Set<HostSpec> allHosts,
Set<HostSpec> container1,
Set<HostSpec> container2,
Set<HostSpec> content0,
Set<HostSpec> content1) {
this.allHosts = allHosts;
this.container0 = container1;
this.container1 = container2;
this.content0 = content0;
this.content1 = content1;
}
/** Returns a host by cluster name and index, or null if there is no host with the given values in this */
public HostSpec hostByMembership(String clusterId, int group, int index) {
for (HostSpec host : allHosts) {
if ( ! host.membership().isPresent()) continue;
ClusterMembership membership = host.membership().get();
if (membership.cluster().id().value().equals(clusterId) &&
groupMatches(membership.cluster().group(), group) &&
membership.index() == index)
return host;
}
return null;
}
private boolean groupMatches(Optional<ClusterSpec.Group> clusterGroup, int group) {
if ( ! clusterGroup.isPresent()) return group==0;
return clusterGroup.get().index() == group;
}
public Set<String> hostNames() {
return allHosts.stream().map(HostSpec::hostname).collect(Collectors.toSet());
}
public HostSpec removeHost(String hostname) {
for (Iterator<HostSpec> i = allHosts.iterator(); i.hasNext();) {
HostSpec host = i.next();
if (host.hostname().equals(hostname)) {
i.remove();
return host;
}
}
return null;
}
public void assertExtends(SystemState other) {
assertTrue(this.allHosts.containsAll(other.allHosts));
assertExtends(this.container0, other.container0);
assertExtends(this.container1, other.container1);
assertExtends(this.content0, other.content0);
assertExtends(this.content1, other.content1);
}
private void assertExtends(Set<HostSpec> extension,
Set<HostSpec> original) {
for (HostSpec originalHost : original) {
HostSpec newHost = findHost(originalHost.hostname(), extension);
org.junit.Assert.assertEquals(newHost.membership(), originalHost.membership());
}
}
private HostSpec findHost(String hostName, Set<HostSpec> hosts) {
for (HostSpec host : hosts)
if (host.hostname().equals(hostName))
return host;
return null;
}
public void assertEquals(SystemState other) {
org.junit.Assert.assertEquals(this.allHosts, other.allHosts);
org.junit.Assert.assertEquals(this.container0, other.container0);
org.junit.Assert.assertEquals(this.container1, other.container1);
org.junit.Assert.assertEquals(this.content0, other.content0);
org.junit.Assert.assertEquals(this.content1, other.content1);
}
}
/** A predicate that returns whether a node has been retired by the given agent */
private static Predicate<Node> retiredBy(Agent agent) {
return (node) -> node.history().event(History.Event.Type.retired)
.filter(e -> e.type() == History.Event.Type.retired)
.filter(e -> e.agent() == agent)
.isPresent();
}
} |
```suggestion object.setString("wantedDockerImage", allocation.membership().cluster().dockerImage() .orElseGet(() -> nodeRepository.dockerImage(node).withTag(allocation.membership().cluster().vespaVersion()).asString())); object.setString("wantedVespaVersion", allocation.membership().cluster().vespaVersion().toFullString()); ``` (Changing back to `orElseGet()` and setting `wantedVespaVersion` directly) | private void toSlime(Node node, boolean allFields, Cursor object) {
object.setString("url", nodeParentUrl + node.hostname());
if ( ! allFields) return;
object.setString("id", node.hostname());
object.setString("state", serializer.toString(node.state()));
object.setString("type", node.type().name());
object.setString("hostname", node.hostname());
object.setString("type", serializer.toString(node.type()));
if (node.parentHostname().isPresent()) {
object.setString("parentHostname", node.parentHostname().get());
}
object.setString("openStackId", node.id());
object.setString("flavor", node.flavor().name());
node.reservedTo().ifPresent(reservedTo -> object.setString("reservedTo", reservedTo.value()));
if (node.flavor().isConfigured())
object.setDouble("cpuCores", node.flavor().getMinCpuCores());
toSlime(node.flavor().resources(), object.setObject("resources"));
if (node.flavor().cost() > 0)
object.setLong("cost", node.flavor().cost());
object.setString("environment", node.flavor().getType().name());
node.allocation().ifPresent(allocation -> {
toSlime(allocation.owner(), object.setObject("owner"));
toSlime(allocation.membership(), object.setObject("membership"));
object.setLong("restartGeneration", allocation.restartGeneration().wanted());
object.setLong("currentRestartGeneration", allocation.restartGeneration().current());
String wantedVespaVersion = allocation.membership().cluster().vespaVersion().toFullString();
object.setString("wantedDockerImage", allocation.membership().cluster().dockerImage()
.orElse(nodeRepository.dockerImage(node).withTag(allocation.membership().cluster().vespaVersion()).asString()));
object.setString("wantedVespaVersion", wantedVespaVersion);
toSlime(allocation.requestedResources(), object.setObject("requestedResources"));
allocation.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray("networkPorts")));
orchestrator.apply(new HostName(node.hostname()))
.ifPresent(info -> {
object.setBool("allowedToBeDown", info.status().isSuspended());
info.suspendedSince().ifPresent(since -> object.setLong("suspendedSinceMillis", since.toEpochMilli()));
});
});
object.setLong("rebootGeneration", node.status().reboot().wanted());
object.setLong("currentRebootGeneration", node.status().reboot().current());
node.status().osVersion().current().ifPresent(version -> object.setString("currentOsVersion", version.toFullString()));
node.status().osVersion().wanted().ifPresent(version -> object.setString("wantedOsVersion", version.toFullString()));
node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong("currentFirmwareCheck", instant.toEpochMilli()));
if (node.type().isDockerHost())
nodeRepository.firmwareChecks().requiredAfter().ifPresent(after -> object.setLong("wantedFirmwareCheck", after.toEpochMilli()));
node.status().vespaVersion().ifPresent(version -> object.setString("vespaVersion", version.toFullString()));
currentDockerImage(node).ifPresent(dockerImage -> object.setString("currentDockerImage", dockerImage.asString()));
object.setLong("failCount", node.status().failCount());
object.setBool("wantToRetire", node.status().wantToRetire());
object.setBool("wantToDeprovision", node.status().wantToDeprovision());
toSlime(node.history(), object.setArray("history"));
ipAddressesToSlime(node.ipAddresses(), object.setArray("ipAddresses"));
ipAddressesToSlime(node.ipAddressPool().asSet(), object.setArray("additionalIpAddresses"));
node.reports().toSlime(object, "reports");
node.modelName().ifPresent(modelName -> object.setString("modelName", modelName));
} | object.setString("wantedVespaVersion", wantedVespaVersion); | private void toSlime(Node node, boolean allFields, Cursor object) {
object.setString("url", nodeParentUrl + node.hostname());
if ( ! allFields) return;
object.setString("id", node.hostname());
object.setString("state", serializer.toString(node.state()));
object.setString("type", node.type().name());
object.setString("hostname", node.hostname());
object.setString("type", serializer.toString(node.type()));
if (node.parentHostname().isPresent()) {
object.setString("parentHostname", node.parentHostname().get());
}
object.setString("openStackId", node.id());
object.setString("flavor", node.flavor().name());
node.reservedTo().ifPresent(reservedTo -> object.setString("reservedTo", reservedTo.value()));
if (node.flavor().isConfigured())
object.setDouble("cpuCores", node.flavor().getMinCpuCores());
toSlime(node.flavor().resources(), object.setObject("resources"));
if (node.flavor().cost() > 0)
object.setLong("cost", node.flavor().cost());
object.setString("environment", node.flavor().getType().name());
node.allocation().ifPresent(allocation -> {
toSlime(allocation.owner(), object.setObject("owner"));
toSlime(allocation.membership(), object.setObject("membership"));
object.setLong("restartGeneration", allocation.restartGeneration().wanted());
object.setLong("currentRestartGeneration", allocation.restartGeneration().current());
object.setString("wantedDockerImage", allocation.membership().cluster().dockerImage()
.orElseGet(() -> nodeRepository.dockerImage(node).withTag(allocation.membership().cluster().vespaVersion()).asString()));
object.setString("wantedVespaVersion", allocation.membership().cluster().vespaVersion().toFullString());
toSlime(allocation.requestedResources(), object.setObject("requestedResources"));
allocation.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray("networkPorts")));
orchestrator.apply(new HostName(node.hostname()))
.ifPresent(info -> {
object.setBool("allowedToBeDown", info.status().isSuspended());
info.suspendedSince().ifPresent(since -> object.setLong("suspendedSinceMillis", since.toEpochMilli()));
});
});
object.setLong("rebootGeneration", node.status().reboot().wanted());
object.setLong("currentRebootGeneration", node.status().reboot().current());
node.status().osVersion().current().ifPresent(version -> object.setString("currentOsVersion", version.toFullString()));
node.status().osVersion().wanted().ifPresent(version -> object.setString("wantedOsVersion", version.toFullString()));
node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong("currentFirmwareCheck", instant.toEpochMilli()));
if (node.type().isDockerHost())
nodeRepository.firmwareChecks().requiredAfter().ifPresent(after -> object.setLong("wantedFirmwareCheck", after.toEpochMilli()));
node.status().vespaVersion().ifPresent(version -> object.setString("vespaVersion", version.toFullString()));
currentDockerImage(node).ifPresent(dockerImage -> object.setString("currentDockerImage", dockerImage.asString()));
object.setLong("failCount", node.status().failCount());
object.setBool("wantToRetire", node.status().wantToRetire());
object.setBool("wantToDeprovision", node.status().wantToDeprovision());
toSlime(node.history(), object.setArray("history"));
ipAddressesToSlime(node.ipAddresses(), object.setArray("ipAddresses"));
ipAddressesToSlime(node.ipAddressPool().asSet(), object.setArray("additionalIpAddresses"));
node.reports().toSlime(object, "reports");
node.modelName().ifPresent(modelName -> object.setString("modelName", modelName));
} | class NodesResponse extends HttpResponse {
/** The responses this can create */
public enum ResponseType { nodeList, stateList, nodesInStateList, singleNode }
/** The request url minus parameters, with a trailing slash added if missing */
private final String parentUrl;
/** The parent url of nodes */
private final String nodeParentUrl;
private final NodeFilter filter;
private final boolean recursive;
private final Function<HostName, Optional<HostInfo>> orchestrator;
private final NodeRepository nodeRepository;
private final Slime slime;
private final NodeSerializer serializer = new NodeSerializer();
public NodesResponse(ResponseType responseType, HttpRequest request,
Orchestrator orchestrator, NodeRepository nodeRepository) {
super(200);
this.parentUrl = toParentUrl(request);
this.nodeParentUrl = toNodeParentUrl(request);
filter = NodesApiHandler.toNodeFilter(request);
this.recursive = request.getBooleanProperty("recursive");
this.orchestrator = orchestrator.getHostResolver();
this.nodeRepository = nodeRepository;
slime = new Slime();
Cursor root = slime.setObject();
switch (responseType) {
case nodeList: nodesToSlime(root); break;
case stateList : statesToSlime(root); break;
case nodesInStateList: nodesToSlime(serializer.stateFrom(lastElement(parentUrl)), root); break;
case singleNode : nodeToSlime(lastElement(parentUrl), root); break;
default: throw new IllegalArgumentException();
}
}
private String toParentUrl(HttpRequest request) {
URI uri = request.getUri();
String parentUrl = uri.getScheme() + ":
if ( ! parentUrl.endsWith("/"))
parentUrl = parentUrl + "/";
return parentUrl;
}
private String toNodeParentUrl(HttpRequest request) {
URI uri = request.getUri();
return uri.getScheme() + ":
}
@Override
public void render(OutputStream stream) throws IOException {
new JsonFormat(true).encode(stream, slime);
}
@Override
public String getContentType() {
return "application/json";
}
private void statesToSlime(Cursor root) {
Cursor states = root.setObject("states");
for (Node.State state : Node.State.values())
toSlime(state, states.setObject(serializer.toString(state)));
}
private void toSlime(Node.State state, Cursor object) {
object.setString("url", parentUrl + serializer.toString(state));
if (recursive)
nodesToSlime(state, object);
}
/** Outputs the nodes in the given state to a node array */
private void nodesToSlime(Node.State state, Cursor parentObject) {
Cursor nodeArray = parentObject.setArray("nodes");
for (NodeType type : NodeType.values())
toSlime(nodeRepository.getNodes(type, state), nodeArray);
}
/** Outputs all the nodes to a node array */
private void nodesToSlime(Cursor parentObject) {
Cursor nodeArray = parentObject.setArray("nodes");
toSlime(nodeRepository.getNodes(), nodeArray);
}
private void toSlime(List<Node> nodes, Cursor array) {
for (Node node : nodes) {
if ( ! filter.matches(node)) continue;
toSlime(node, recursive, array.addObject());
}
}
private void nodeToSlime(String hostname, Cursor object) {
Node node = nodeRepository.getNode(hostname).orElseThrow(() ->
new NotFoundException("No node with hostname '" + hostname + "'"));
toSlime(node, true, object);
}
private void toSlime(ApplicationId id, Cursor object) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
}
private void toSlime(ClusterMembership membership, Cursor object) {
object.setString("clustertype", membership.cluster().type().name());
object.setString("clusterid", membership.cluster().id().value());
object.setString("group", String.valueOf(membership.cluster().group().get().index()));
object.setLong("index", membership.index());
object.setBool("retired", membership.retired());
}
private void toSlime(History history, Cursor array) {
for (History.Event event : history.events()) {
Cursor object = array.addObject();
object.setString("event", event.type().name());
object.setLong("at", event.at().toEpochMilli());
object.setString("agent", event.agent().name());
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", serializer.toString(resources.diskSpeed()));
object.setString("storageType", serializer.toString(resources.storageType()));
}
private Optional<DockerImage> currentDockerImage(Node node) {
return node.status().dockerImage()
.or(() -> Optional.of(node)
.filter(n -> n.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
.flatMap(n -> n.status().vespaVersion()
.map(version -> nodeRepository.dockerImage(n).withTag(version))));
}
private void ipAddressesToSlime(Set<String> ipAddresses, Cursor array) {
ipAddresses.forEach(array::addString);
}
private String lastElement(String path) {
if (path.endsWith("/"))
path = path.substring(0, path.length()-1);
int lastSlash = path.lastIndexOf("/");
if (lastSlash < 0) return path;
return path.substring(lastSlash+1);
}
} | class NodesResponse extends HttpResponse {
/** The responses this can create */
public enum ResponseType { nodeList, stateList, nodesInStateList, singleNode }
/** The request url minus parameters, with a trailing slash added if missing */
private final String parentUrl;
/** The parent url of nodes */
private final String nodeParentUrl;
private final NodeFilter filter;
private final boolean recursive;
private final Function<HostName, Optional<HostInfo>> orchestrator;
private final NodeRepository nodeRepository;
private final Slime slime;
private final NodeSerializer serializer = new NodeSerializer();
public NodesResponse(ResponseType responseType, HttpRequest request,
Orchestrator orchestrator, NodeRepository nodeRepository) {
super(200);
this.parentUrl = toParentUrl(request);
this.nodeParentUrl = toNodeParentUrl(request);
filter = NodesApiHandler.toNodeFilter(request);
this.recursive = request.getBooleanProperty("recursive");
this.orchestrator = orchestrator.getHostResolver();
this.nodeRepository = nodeRepository;
slime = new Slime();
Cursor root = slime.setObject();
switch (responseType) {
case nodeList: nodesToSlime(root); break;
case stateList : statesToSlime(root); break;
case nodesInStateList: nodesToSlime(serializer.stateFrom(lastElement(parentUrl)), root); break;
case singleNode : nodeToSlime(lastElement(parentUrl), root); break;
default: throw new IllegalArgumentException();
}
}
private String toParentUrl(HttpRequest request) {
URI uri = request.getUri();
String parentUrl = uri.getScheme() + ":
if ( ! parentUrl.endsWith("/"))
parentUrl = parentUrl + "/";
return parentUrl;
}
private String toNodeParentUrl(HttpRequest request) {
URI uri = request.getUri();
return uri.getScheme() + ":
}
@Override
public void render(OutputStream stream) throws IOException {
new JsonFormat(true).encode(stream, slime);
}
@Override
public String getContentType() {
return "application/json";
}
private void statesToSlime(Cursor root) {
Cursor states = root.setObject("states");
for (Node.State state : Node.State.values())
toSlime(state, states.setObject(serializer.toString(state)));
}
private void toSlime(Node.State state, Cursor object) {
object.setString("url", parentUrl + serializer.toString(state));
if (recursive)
nodesToSlime(state, object);
}
/** Outputs the nodes in the given state to a node array */
private void nodesToSlime(Node.State state, Cursor parentObject) {
Cursor nodeArray = parentObject.setArray("nodes");
for (NodeType type : NodeType.values())
toSlime(nodeRepository.getNodes(type, state), nodeArray);
}
/** Outputs all the nodes to a node array */
private void nodesToSlime(Cursor parentObject) {
Cursor nodeArray = parentObject.setArray("nodes");
toSlime(nodeRepository.getNodes(), nodeArray);
}
private void toSlime(List<Node> nodes, Cursor array) {
for (Node node : nodes) {
if ( ! filter.matches(node)) continue;
toSlime(node, recursive, array.addObject());
}
}
private void nodeToSlime(String hostname, Cursor object) {
Node node = nodeRepository.getNode(hostname).orElseThrow(() ->
new NotFoundException("No node with hostname '" + hostname + "'"));
toSlime(node, true, object);
}
private void toSlime(ApplicationId id, Cursor object) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
}
private void toSlime(ClusterMembership membership, Cursor object) {
object.setString("clustertype", membership.cluster().type().name());
object.setString("clusterid", membership.cluster().id().value());
object.setString("group", String.valueOf(membership.cluster().group().get().index()));
object.setLong("index", membership.index());
object.setBool("retired", membership.retired());
}
private void toSlime(History history, Cursor array) {
for (History.Event event : history.events()) {
Cursor object = array.addObject();
object.setString("event", event.type().name());
object.setLong("at", event.at().toEpochMilli());
object.setString("agent", event.agent().name());
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", serializer.toString(resources.diskSpeed()));
object.setString("storageType", serializer.toString(resources.storageType()));
}
private Optional<DockerImage> currentDockerImage(Node node) {
return node.status().dockerImage()
.or(() -> Optional.of(node)
.filter(n -> n.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
.flatMap(n -> n.status().vespaVersion()
.map(version -> nodeRepository.dockerImage(n).withTag(version))));
}
private void ipAddressesToSlime(Set<String> ipAddresses, Cursor array) {
ipAddresses.forEach(array::addString);
}
private String lastElement(String path) {
if (path.endsWith("/"))
path = path.substring(0, path.length()-1);
int lastSlash = path.lastIndexOf("/");
if (lastSlash < 0) return path;
return path.substring(lastSlash+1);
}
} |
Minor: You could use the request() method that creates a builder here | private List<ApplicationContainer> allocateWithoutNodesTag(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
HostSystem hostSystem = cluster.hostSystem();
if (deployState.isHosted()) {
if (!context.properties().useDedicatedNodesWhenUnspecified()) {
Optional<HostResource> singleContentHost = getHostResourceFromContentClusters(cluster, containerElement, context);
if (singleContentHost.isPresent()) {
return singleHostContainerCluster(cluster, singleContentHost.get(), context);
}
else {
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(deployState.getWantedNodeVespaVersion())
.dockerImageRepo(deployState.getWantedDockerImageRepo())
.build();
Capacity capacity = Capacity.fromCount(1,
Optional.empty(),
false,
! deployState.getProperties().isBootstrap());
HostResource host = hostSystem.allocateHosts(clusterSpec, capacity, 1, log).keySet().iterator().next();
return singleHostContainerCluster(cluster, host, context);
}
}
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
deployState.getWantedNodeVespaVersion(),
false,
Optional.empty(),
deployState.getWantedDockerImageRepo());
int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1;
Capacity capacity = Capacity.fromCount(nodeCount,
Optional.empty(),
false,
!deployState.getProperties().isBootstrap());
var hosts = hostSystem.allocateHosts(clusterSpec, capacity, 1, log);
return createNodesFromHosts(log, hosts, cluster);
}
return singleHostContainerCluster(cluster, hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC), context);
} | ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, | private List<ApplicationContainer> allocateWithoutNodesTag(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
HostSystem hostSystem = cluster.hostSystem();
if (deployState.isHosted()) {
if (!context.properties().useDedicatedNodesWhenUnspecified()) {
Optional<HostResource> singleContentHost = getHostResourceFromContentClusters(cluster, containerElement, context);
if (singleContentHost.isPresent()) {
return singleHostContainerCluster(cluster, singleContentHost.get(), context);
}
else {
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(deployState.getWantedNodeVespaVersion())
.dockerImageRepo(deployState.getWantedDockerImageRepo())
.build();
Capacity capacity = Capacity.fromCount(1,
Optional.empty(),
false,
! deployState.getProperties().isBootstrap());
HostResource host = hostSystem.allocateHosts(clusterSpec, capacity, 1, log).keySet().iterator().next();
return singleHostContainerCluster(cluster, host, context);
}
}
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(deployState.getWantedNodeVespaVersion())
.dockerImageRepo(deployState.getWantedDockerImageRepo())
.build();
int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1;
Capacity capacity = Capacity.fromCount(nodeCount,
Optional.empty(),
false,
!deployState.getProperties().isBootstrap());
var hosts = hostSystem.allocateHosts(clusterSpec, capacity, 1, log);
return createNodesFromHosts(log, hosts, cluster);
}
return singleHostContainerCluster(cluster, hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC), context);
} | class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html");
private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE";
private static final String CONTAINER_TAG = "container";
private static final String DEPRECATED_CONTAINER_TAG = "jdisc";
private static final String ENVIRONMENT_VARIABLES_ELEMENT = "environment-variables";
static final String SEARCH_HANDLER_CLASS = com.yahoo.search.handler.SearchHandler.class.getName();
static final String SEARCH_HANDLER_BINDING = "http:
public enum Networking { disable, enable }
private ApplicationPackage app;
private final boolean standaloneBuilder;
private final Networking networking;
private final boolean rpcServerEnabled;
private final boolean httpServerEnabled;
protected DeployLogger log;
public static final List<ConfigModelId> configModelIds =
ImmutableList.of(ConfigModelId.fromName(CONTAINER_TAG), ConfigModelId.fromName(DEPRECATED_CONTAINER_TAG));
private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName();
private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName();
public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) {
super(ContainerModel.class);
this.standaloneBuilder = standaloneBuilder;
this.networking = networking;
this.rpcServerEnabled = !standaloneBuilder;
this.httpServerEnabled = networking == Networking.enable;
}
@Override
public List<ConfigModelId> handlesElements() {
return configModelIds;
}
@Override
public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) {
log = modelContext.getDeployLogger();
app = modelContext.getApplicationPackage();
checkVersion(spec);
checkTagName(spec, log);
ApplicationContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
addBundlesForPlatformComponents(cluster);
cluster.setMessageBusEnabled(rpcServerEnabled);
cluster.setRpcServerEnabled(rpcServerEnabled);
cluster.setHttpServerEnabled(httpServerEnabled);
model.setCluster(cluster);
}
private void addBundlesForPlatformComponents(ApplicationContainerCluster cluster) {
for (Component<?, ?> component : cluster.getAllComponents()) {
String componentClass = component.model.bundleInstantiationSpec.getClassName();
BundleMapper.getBundlePath(componentClass).
ifPresent(cluster::addPlatformBundle);
}
}
private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilder<ApplicationContainerCluster>() {
@Override
protected ApplicationContainerCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) {
return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(),
modelContext.getProducerId(), deployState);
}
}.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec);
}
private void addClusterContent(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
DocumentFactoryBuilder.buildDocumentFactories(cluster, spec);
addConfiguredComponents(deployState, cluster, spec);
addSecretStore(cluster, spec);
addRestApis(deployState, spec, cluster);
addServlets(deployState, spec, cluster);
addModelEvaluation(spec, cluster, context);
addProcessing(deployState, spec, cluster);
addSearch(deployState, spec, cluster);
addDocproc(deployState, spec, cluster);
addDocumentApi(spec, cluster);
cluster.addDefaultHandlersExceptStatus();
addStatusHandlers(cluster, context.getDeployState().isHosted());
addUserHandlers(deployState, cluster, spec);
addHttp(deployState, spec, cluster, context);
addAccessLogs(deployState, cluster, spec);
addRoutingAliases(cluster, spec, deployState.zone().environment());
addNodes(cluster, spec, context);
addClientProviders(deployState, spec, cluster);
addServerProviders(deployState, spec, cluster);
addAthensCopperArgos(cluster, context);
}
private void addSecretStore(ApplicationContainerCluster cluster, Element spec) {
Element secretStoreElement = XML.getChild(spec, "secret-store");
if (secretStoreElement != null) {
SecretStore secretStore = new SecretStore();
for (Element group : XML.getChildren(secretStoreElement, "group")) {
secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment"));
}
cluster.setSecretStore(secretStore);
}
}
private void addAthensCopperArgos(ApplicationContainerCluster cluster, ConfigModelContext context) {
if ( ! context.getDeployState().isHosted()) return;
app.getDeployment().map(DeploymentSpec::fromXml)
.ifPresent(deploymentSpec -> {
addIdentityProvider(cluster,
context.getDeployState().getProperties().configServerSpecs(),
context.getDeployState().getProperties().loadBalancerName(),
context.getDeployState().getProperties().ztsUrl(),
context.getDeployState().getProperties().athenzDnsSuffix(),
context.getDeployState().zone(),
deploymentSpec);
addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getEndpoints(), deploymentSpec);
});
}
private void addRotationProperties(ApplicationContainerCluster cluster, Zone zone, Set<ContainerEndpoint> endpoints, DeploymentSpec spec) {
cluster.getContainers().forEach(container -> {
setRotations(container, endpoints, cluster.getName());
container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec)));
});
}
private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) {
Optional<DeploymentInstanceSpec> instance = spec.instance(app.getApplicationId().instance());
if (instance.isEmpty()) return false;
return instance.get().zones().stream()
.anyMatch(declaredZone -> declaredZone.concerns(zone.environment(), Optional.of(zone.region())) &&
declaredZone.active());
}
private void setRotations(Container container, Set<ContainerEndpoint> endpoints, String containerClusterName) {
var rotationsProperty = endpoints.stream()
.filter(endpoint -> endpoint.clusterId().equals(containerClusterName))
.flatMap(endpoint -> endpoint.names().stream())
.collect(Collectors.toUnmodifiableSet());
container.setProp("rotations", String.join(",", rotationsProperty));
}
private void addRoutingAliases(ApplicationContainerCluster cluster, Element spec, Environment environment) {
if (environment != Environment.prod) return;
Element aliases = XML.getChild(spec, "aliases");
for (Element alias : XML.getChildren(aliases, "service-alias")) {
cluster.serviceAliases().add(XML.getValue(alias));
}
for (Element alias : XML.getChildren(aliases, "endpoint-alias")) {
cluster.endpointAliases().add(XML.getValue(alias));
}
}
private void addConfiguredComponents(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element components : XML.getChildren(spec, "components")) {
addIncludes(components);
addConfiguredComponents(deployState, cluster, components, "component");
}
addConfiguredComponents(deployState, cluster, spec, "component");
}
protected void addStatusHandlers(ApplicationContainerCluster cluster, boolean isHostedVespa) {
if (isHostedVespa) {
String name = "status.html";
Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_SETTING));
cluster.addComponent(
new FileStatusHandlerComponent(name + "-status-handler", statusFile.orElse(HOSTED_VESPA_STATUS_FILE),
"http:
} else {
cluster.addVipHandler();
}
}
private void addClientProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
for (Element clientSpec: XML.getChildren(spec, "client")) {
cluster.addComponent(new DomClientProviderBuilder(cluster).build(deployState, cluster, clientSpec));
}
}
private void addServerProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
addConfiguredComponents(deployState, cluster, spec, "server");
}
private void addAccessLogs(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
List<Element> accessLogElements = getAccessLogElements(spec);
for (Element accessLog : accessLogElements) {
AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(cluster::addComponent);
}
if (accessLogElements.isEmpty() && deployState.getAccessLoggingEnabledByDefault())
cluster.addDefaultSearchAccessLog();
}
private List<Element> getAccessLogElements(Element spec) {
return XML.getChildren(spec, "accesslog");
}
private void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element httpElement = XML.getChild(spec, "http");
if (httpElement != null) {
cluster.setHttp(buildHttp(deployState, cluster, httpElement));
}
if (isHostedTenantApplication(context)) {
addHostedImplicitHttpIfNotPresent(deployState, cluster);
addAdditionalHostedConnector(deployState, cluster);
}
}
private void addAdditionalHostedConnector(DeployState deployState, ApplicationContainerCluster cluster) {
JettyHttpServer server = cluster.getHttp().getHttpServer();
String serverName = server.getComponentId().getName();
String proxyProtocol = deployState.getProperties().proxyProtocol();
if (deployState.endpointCertificateSecrets().isPresent()) {
boolean authorizeClient = deployState.zone().system().isPublic();
if (authorizeClient && deployState.tlsClientAuthority().isEmpty()) {
throw new RuntimeException("Client certificate authority security/clients.pem is missing - see: https:
}
EndpointCertificateSecrets endpointCertificateSecrets = deployState.endpointCertificateSecrets().get();
HostedSslConnectorFactory connectorFactory = authorizeClient
? HostedSslConnectorFactory.withProvidedCertificateAndTruststore(proxyProtocol, serverName, endpointCertificateSecrets, deployState.tlsClientAuthority().get())
: HostedSslConnectorFactory.withProvidedCertificate(proxyProtocol, serverName, endpointCertificateSecrets);
server.addConnector(connectorFactory);
} else {
server.addConnector(HostedSslConnectorFactory.withDefaultCertificateAndTruststore(proxyProtocol, serverName));
}
}
private static boolean isHostedTenantApplication(ConfigModelContext context) {
var deployState = context.getDeployState();
boolean isTesterApplication = deployState.getProperties().applicationId().instance().isTester();
return deployState.isHosted() && context.getApplicationType() == ApplicationType.DEFAULT && !isTesterApplication;
}
private static void addHostedImplicitHttpIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
if(cluster.getHttp() == null) {
Http http = deployState.getProperties().athenzDomain()
.map(tenantDomain -> createHostedImplicitHttpWithAccessControl(deployState, tenantDomain, cluster))
.orElseGet(() -> createHostedImplicitHttpWithoutAccessControl(cluster));
cluster.setHttp(http);
}
if(cluster.getHttp().getHttpServer() == null) {
JettyHttpServer defaultHttpServer = new JettyHttpServer(new ComponentId("DefaultHttpServer"));
cluster.getHttp().setHttpServer(defaultHttpServer);
defaultHttpServer.addConnector(new ConnectorFactory("SearchServer", Defaults.getDefaults().vespaWebServicePort()));
}
}
private static Http createHostedImplicitHttpWithAccessControl(
DeployState deployState, AthenzDomain tenantDomain, ApplicationContainerCluster cluster) {
AccessControl accessControl =
new AccessControl.Builder(tenantDomain.value(), deployState.getDeployLogger())
.setHandlers(cluster)
.readEnabled(false)
.writeEnabled(false)
.build();
Http http = new Http(accessControl.getBindings(), accessControl);
FilterChains filterChains = new FilterChains(cluster);
filterChains.add(new Chain<>(FilterChains.emptyChainSpec(ACCESS_CONTROL_CHAIN_ID)));
http.setFilterChains(filterChains);
return http;
}
private static Http createHostedImplicitHttpWithoutAccessControl(ApplicationContainerCluster cluster) {
Http http = new Http(Collections.emptyList());
http.setFilterChains(new FilterChains(cluster));
return http;
}
private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement) {
Http http = new HttpBuilder().build(deployState, cluster, httpElement);
if (networking == Networking.disable)
http.removeAllServers();
return http;
}
private void addRestApis(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
for (Element restApiElem : XML.getChildren(spec, "rest-api")) {
cluster.addRestApi(
new RestApiBuilder().build(deployState, cluster, restApiElem));
}
}
private void addServlets(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
for (Element servletElem : XML.getChildren(spec, "servlet"))
cluster.addServlet(new ServletBuilder().build(deployState, cluster, servletElem));
}
private void addDocumentApi(Element spec, ApplicationContainerCluster cluster) {
ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec);
if (containerDocumentApi == null) return;
cluster.setDocumentApi(containerDocumentApi);
}
private void addDocproc(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec);
if (containerDocproc == null) return;
cluster.setDocproc(containerDocproc);
ContainerDocproc.Options docprocOptions = containerDocproc.options;
cluster.setMbusParams(new ApplicationContainerCluster.MbusParams(
docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory));
}
private void addSearch(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
Element searchElement = XML.getChild(spec, "search");
if (searchElement == null) return;
addIncludes(searchElement);
cluster.setSearch(buildSearch(deployState, cluster, searchElement));
addSearchHandler(cluster, searchElement);
addGUIHandler(cluster);
validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private void addModelEvaluation(Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element modelEvaluationElement = XML.getChild(spec, "model-evaluation");
if (modelEvaluationElement == null) return;
RankProfileList profiles =
context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty;
cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles));
}
private void addProcessing(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
Element processingElement = XML.getChild(spec, "processing");
if (processingElement == null) return;
addIncludes(processingElement);
cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement),
serverBindings(processingElement, ProcessingChains.defaultBindings));
validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private ContainerSearch buildSearch(DeployState deployState, ApplicationContainerCluster containerCluster, Element producerSpec) {
SearchChains searchChains = new DomSearchChainsBuilder(null, false).build(deployState, containerCluster, producerSpec);
ContainerSearch containerSearch = new ContainerSearch(containerCluster, searchChains, new ContainerSearch.Options());
applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch);
containerSearch.setQueryProfiles(deployState.getQueryProfiles());
containerSearch.setSemanticRules(deployState.getSemanticRules());
return containerSearch;
}
private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) {
PageTemplates.validate(applicationPackage);
containerSearch.setPageTemplates(PageTemplates.create(applicationPackage));
}
private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element component: XML.getChildren(spec, "handler")) {
cluster.addComponent(
new DomHandlerBuilder(cluster).build(deployState, cluster, component));
}
}
private void checkVersion(Element spec) {
String version = spec.getAttribute("version");
if ( ! Version.fromString(version).equals(new Version(1))) {
throw new RuntimeException("Expected container version to be 1.0, but got " + version);
}
}
private void checkTagName(Element spec, DeployLogger logger) {
if (spec.getTagName().equals(DEPRECATED_CONTAINER_TAG)) {
logger.log(WARNING, "'" + DEPRECATED_CONTAINER_TAG + "' is deprecated as tag name. Use '" + CONTAINER_TAG + "' instead.");
}
}
private void addNodes(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
if (standaloneBuilder)
addStandaloneNode(cluster);
else
addNodesFromXml(cluster, spec, context);
}
private void addStandaloneNode(ApplicationContainerCluster cluster) {
ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), cluster.isHostedVespa());
cluster.addContainers(Collections.singleton(container));
}
static boolean incompatibleGCOptions(String jvmargs) {
Pattern gcAlgorithm = Pattern.compile("-XX:[-+]Use.+GC");
Pattern cmsArgs = Pattern.compile("-XX:[-+]*CMS");
return (gcAlgorithm.matcher(jvmargs).find() ||cmsArgs.matcher(jvmargs).find());
}
private static String buildJvmGCOptions(Zone zone, String jvmGCOPtions, boolean isHostedVespa) {
if (jvmGCOPtions != null) {
return jvmGCOPtions;
} else if ((zone.system() == SystemName.dev) || isHostedVespa) {
return null;
} else {
return ContainerCluster.G1GC;
}
}
private static String getJvmOptions(ApplicationContainerCluster cluster, Element nodesElement, DeployLogger deployLogger) {
String jvmOptions = "";
if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS);
if (nodesElement.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) {
String jvmArgs = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME);
throw new IllegalArgumentException("You have specified both jvm-options='" + jvmOptions + "'" +
" and deprecated jvmargs='" + jvmArgs + "'. Merge jvmargs into jvm-options.");
}
} else {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME);
if (incompatibleGCOptions(jvmOptions)) {
deployLogger.log(WARNING, "You need to move out your GC related options from 'jvmargs' to 'jvm-gc-options'");
cluster.setJvmGCOptions(ContainerCluster.G1GC);
}
}
return jvmOptions;
}
void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployLogger()));
if (!cluster.getJvmGCOptions().isPresent()) {
String jvmGCOptions = nodesElement.hasAttribute(VespaDomBuilder.JVM_GC_OPTIONS)
? nodesElement.getAttribute(VespaDomBuilder.JVM_GC_OPTIONS)
: null;
cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState().zone(), jvmGCOptions, context.getDeployState().isHosted()));
}
applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
}
void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element jvmElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, jvmElement.getAttribute(VespaDomBuilder.OPTIONS));
applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
String jvmGCOptions = jvmElement.hasAttribute(VespaDomBuilder.GC_OPTIONS)
? jvmElement.getAttribute(VespaDomBuilder.GC_OPTIONS)
: null;
cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState().zone(), jvmGCOptions, context.getDeployState().isHosted()));
}
private void addNodesFromXml(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) {
Element nodesElement = XML.getChild(containerElement, "nodes");
if (nodesElement == null) {
cluster.addContainers(allocateWithoutNodesTag(cluster, containerElement, context));
} else {
List<ApplicationContainer> nodes = createNodes(cluster, nodesElement, context);
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) {
extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context);
} else {
extractJvmTag(nodes, cluster, jvmElement, context);
}
applyRoutingAliasProperties(nodes, cluster);
applyDefaultPreload(nodes, nodesElement);
String environmentVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT));
if (environmentVars != null && !environmentVars.isEmpty()) {
cluster.setEnvironmentVars(environmentVars);
}
if (useCpuSocketAffinity(nodesElement))
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
}
private static String getEnvironmentVariables(Element environmentVariables) {
StringBuilder sb = new StringBuilder();
if (environmentVariables != null) {
for (Element var: XML.getChildren(environmentVariables)) {
sb.append(var.getNodeName()).append('=').append(var.getTextContent()).append(' ');
}
}
return sb.toString();
}
private List<ApplicationContainer> createNodes(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
if (nodesElement.hasAttribute("type"))
return createNodesFromNodeType(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("of"))
return createNodesFromContentServiceReference(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("count"))
return createNodesFromNodeCount(cluster, nodesElement, context);
else if (cluster.isHostedVespa() && cluster.getZone().environment().isManuallyDeployed())
return createNodesFromNodeCount(cluster, nodesElement, context);
else
return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement);
}
private static void applyRoutingAliasProperties(List<ApplicationContainer> result, ApplicationContainerCluster cluster) {
if (!cluster.serviceAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("servicealiases", cluster.serviceAliases().stream().collect(Collectors.joining(",")));
});
}
if (!cluster.endpointAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("endpointaliases", cluster.endpointAliases().stream().collect(Collectors.joining(",")));
});
}
}
private static void applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) {
if (memoryPercentage == null || memoryPercentage.isEmpty()) return;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
try {
cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage));
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
}
}
/** Allocate a container cluster without a nodes tag */
private List<ApplicationContainer> singleHostContainerCluster(ApplicationContainerCluster cluster, HostResource host, ConfigModelContext context) {
ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, cluster.isHostedVespa());
node.setHostResource(host);
node.initService(context.getDeployLogger());
return List.of(node);
}
private List<ApplicationContainer> createNodesFromNodeCount(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().hostSystem(),
ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
log);
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
private List<ApplicationContainer> createNodesFromNodeType(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodeType type = NodeType.valueOf(nodesElement.getAttribute("type"));
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(context.getDeployState().getWantedNodeVespaVersion())
.dockerImageRepo(context.getDeployState().getWantedDockerImageRepo())
.build();
Map<HostResource, ClusterMembership> hosts =
cluster.getRoot().hostSystem().allocateHosts(clusterSpec,
Capacity.fromRequiredNodeType(type), 1, log);
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
private List<ApplicationContainer> createNodesFromContentServiceReference(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodeSpecification;
try {
nodeSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(cluster + " contains an invalid reference", e);
}
String referenceId = nodesElement.getAttribute("of");
cluster.setHostClusterId(referenceId);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodeSpecification,
referenceId,
cluster.getRoot().hostSystem(),
context.getDeployLogger());
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
/**
* This is used in case we are on hosted Vespa and no nodes tag is supplied:
* If there are content clusters this will pick the first host in the first cluster as the container node.
* If there are no content clusters this will return empty (such that the node can be created by the container here).
*/
private Optional<HostResource> getHostResourceFromContentClusters(ApplicationContainerCluster cluster, Element containersElement, ConfigModelContext context) {
Optional<Element> services = servicesRootOf(containersElement);
if ( ! services.isPresent())
return Optional.empty();
List<Element> contentServices = XML.getChildren(services.get(), "content");
if ( contentServices.isEmpty() ) return Optional.empty();
Element contentNodesElementOrNull = XML.getChild(contentServices.get(0), "nodes");
NodesSpecification nodesSpec;
if (contentNodesElementOrNull == null)
nodesSpec = NodesSpecification.nonDedicated(1, context);
else
nodesSpec = NodesSpecification.from(new ModelElement(contentNodesElementOrNull), context);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodesSpec,
contentServices.get(0).getAttribute("id"),
cluster.getRoot().hostSystem(),
context.getDeployLogger());
return Optional.of(hosts.keySet().iterator().next());
}
/** Returns the services element above the given Element, or empty if there is no services element */
private Optional<Element> servicesRootOf(Element element) {
Node parent = element.getParentNode();
if (parent == null) return Optional.empty();
if ( ! (parent instanceof Element)) return Optional.empty();
Element parentElement = (Element)parent;
if (parentElement.getTagName().equals("services")) return Optional.of(parentElement);
return servicesRootOf(parentElement);
}
private List<ApplicationContainer> createNodesFromHosts(DeployLogger deployLogger, Map<HostResource, ClusterMembership> hosts, ApplicationContainerCluster cluster) {
List<ApplicationContainer> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), cluster.isHostedVespa());
container.setHostResource(entry.getKey());
container.initService(deployLogger);
nodes.add(container);
}
return nodes;
}
private List<ApplicationContainer> createNodesFromNodeList(DeployState deployState, ApplicationContainerCluster cluster, Element nodesElement) {
List<ApplicationContainer> nodes = new ArrayList<>();
int nodeIndex = 0;
for (Element nodeElem: XML.getChildren(nodesElement, "node")) {
nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem));
nodeIndex++;
}
return nodes;
}
private static boolean useCpuSocketAffinity(Element nodesElement) {
if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME))
return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME));
else
return false;
}
private static void applyNodesTagJvmArgs(List<ApplicationContainer> containers, String jvmArgs) {
for (Container container: containers) {
if (container.getAssignedJvmOptions().isEmpty())
container.prependJvmOptions(jvmArgs);
}
}
private static void applyDefaultPreload(List<ApplicationContainer> containers, Element nodesElement) {
if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return;
for (Container container: containers)
container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME));
}
private void addSearchHandler(ApplicationContainerCluster cluster, Element searchElement) {
cluster.addComponent(new ProcessingHandler<>(cluster.getSearch().getChains(),
"com.yahoo.search.searchchain.ExecutionFactory"));
ProcessingHandler<SearchChains> searchHandler = new ProcessingHandler<>(cluster.getSearch().getChains(),
"com.yahoo.search.handler.SearchHandler");
String[] defaultBindings = {SEARCH_HANDLER_BINDING};
for (String binding: serverBindings(searchElement, defaultBindings)) {
searchHandler.addServerBindings(binding);
}
cluster.addComponent(searchHandler);
}
private void addGUIHandler(ApplicationContainerCluster cluster) {
Handler<?> guiHandler = new GUIHandler();
guiHandler.addServerBindings("http:
cluster.addComponent(guiHandler);
}
private String[] serverBindings(Element searchElement, String... defaultBindings) {
List<Element> bindings = XML.getChildren(searchElement, "binding");
if (bindings.isEmpty())
return defaultBindings;
return toBindingList(bindings);
}
private String[] toBindingList(List<Element> bindingElements) {
List<String> result = new ArrayList<>();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
result.add(text);
}
return result.toArray(new String[result.size()]);
}
private ContainerDocumentApi buildDocumentApi(ApplicationContainerCluster cluster, Element spec) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
return new ContainerDocumentApi(cluster, documentApiOptions);
}
private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
Element docprocElement = XML.getChild(spec, "document-processing");
if (docprocElement == null)
return null;
addIncludes(docprocElement);
DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement);
ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement);
return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder);
}
private void addIncludes(Element parentElement) {
List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE);
if (includes == null || includes.isEmpty()) {
return;
}
if (app == null) {
throw new IllegalArgumentException("Element <include> given in XML config, but no application package given.");
}
for (Element include : includes) {
addInclude(parentElement, include);
}
}
private void addInclude(Element parentElement, Element include) {
String dirName = include.getAttribute(IncludeDirs.DIR);
app.validateIncludeDir(dirName);
List<Element> includedFiles = Xml.allElemsFromPath(app, dirName);
for (Element includedFile : includedFiles) {
List<Element> includedSubElements = XML.getChildren(includedFile);
for (Element includedSubElement : includedSubElements) {
Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true);
parentElement.appendChild(copiedNode);
}
}
}
private static void addConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster,
Element spec, String componentName) {
for (Element node : XML.getChildren(spec, componentName)) {
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private static void validateAndAddConfiguredComponents(DeployState deployState,
ContainerCluster<? extends Container> cluster,
Element spec, String componentName,
Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private void addIdentityProvider(ApplicationContainerCluster cluster,
List<ConfigServerSpec> configServerSpecs,
HostName loadBalancerName,
URI ztsUrl,
String athenzDnsSuffix,
Zone zone,
DeploymentSpec spec) {
spec.athenzDomain()
.ifPresent(domain -> {
AthenzService service = spec.instance(app.getApplicationId().instance())
.flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region()))
.or(() -> spec.athenzService())
.orElseThrow(() -> new RuntimeException("Missing Athenz service configuration in instance '" + app.getApplicationId().instance() + "'"));
String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix;
IdentityProvider identityProvider = new IdentityProvider(domain, service, getLoadBalancerName(loadBalancerName, configServerSpecs), ztsUrl, zoneDnsSuffix, zone);
cluster.addComponent(identityProvider);
cluster.getContainers().forEach(container -> {
container.setProp("identity.domain", domain.value());
container.setProp("identity.service", service.value());
});
});
}
private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) {
return Optional.ofNullable(loadbalancerName)
.orElseGet(
() -> HostName.from(configServerSpecs.stream()
.findFirst()
.map(ConfigServerSpec::getHostName)
.orElse("unknown")
));
}
/** Disallow renderers named "XmlRenderer" or "JsonRenderer" */
private static void validateRendererElement(Element element) {
String idAttr = element.getAttribute("id");
if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) {
throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr));
}
}
public static boolean isContainerTag(Element element) {
return CONTAINER_TAG.equals(element.getTagName()) || DEPRECATED_CONTAINER_TAG.equals(element.getTagName());
}
} | class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html");
private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE";
private static final String CONTAINER_TAG = "container";
private static final String DEPRECATED_CONTAINER_TAG = "jdisc";
private static final String ENVIRONMENT_VARIABLES_ELEMENT = "environment-variables";
static final String SEARCH_HANDLER_CLASS = com.yahoo.search.handler.SearchHandler.class.getName();
static final String SEARCH_HANDLER_BINDING = "http:
public enum Networking { disable, enable }
private ApplicationPackage app;
private final boolean standaloneBuilder;
private final Networking networking;
private final boolean rpcServerEnabled;
private final boolean httpServerEnabled;
protected DeployLogger log;
public static final List<ConfigModelId> configModelIds =
ImmutableList.of(ConfigModelId.fromName(CONTAINER_TAG), ConfigModelId.fromName(DEPRECATED_CONTAINER_TAG));
private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName();
private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName();
public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) {
super(ContainerModel.class);
this.standaloneBuilder = standaloneBuilder;
this.networking = networking;
this.rpcServerEnabled = !standaloneBuilder;
this.httpServerEnabled = networking == Networking.enable;
}
@Override
public List<ConfigModelId> handlesElements() {
return configModelIds;
}
@Override
public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) {
log = modelContext.getDeployLogger();
app = modelContext.getApplicationPackage();
checkVersion(spec);
checkTagName(spec, log);
ApplicationContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
addBundlesForPlatformComponents(cluster);
cluster.setMessageBusEnabled(rpcServerEnabled);
cluster.setRpcServerEnabled(rpcServerEnabled);
cluster.setHttpServerEnabled(httpServerEnabled);
model.setCluster(cluster);
}
private void addBundlesForPlatformComponents(ApplicationContainerCluster cluster) {
for (Component<?, ?> component : cluster.getAllComponents()) {
String componentClass = component.model.bundleInstantiationSpec.getClassName();
BundleMapper.getBundlePath(componentClass).
ifPresent(cluster::addPlatformBundle);
}
}
private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilder<ApplicationContainerCluster>() {
@Override
protected ApplicationContainerCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) {
return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(),
modelContext.getProducerId(), deployState);
}
}.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec);
}
private void addClusterContent(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
DocumentFactoryBuilder.buildDocumentFactories(cluster, spec);
addConfiguredComponents(deployState, cluster, spec);
addSecretStore(cluster, spec);
addRestApis(deployState, spec, cluster);
addServlets(deployState, spec, cluster);
addModelEvaluation(spec, cluster, context);
addProcessing(deployState, spec, cluster);
addSearch(deployState, spec, cluster);
addDocproc(deployState, spec, cluster);
addDocumentApi(spec, cluster);
cluster.addDefaultHandlersExceptStatus();
addStatusHandlers(cluster, context.getDeployState().isHosted());
addUserHandlers(deployState, cluster, spec);
addHttp(deployState, spec, cluster, context);
addAccessLogs(deployState, cluster, spec);
addRoutingAliases(cluster, spec, deployState.zone().environment());
addNodes(cluster, spec, context);
addClientProviders(deployState, spec, cluster);
addServerProviders(deployState, spec, cluster);
addAthensCopperArgos(cluster, context);
}
private void addSecretStore(ApplicationContainerCluster cluster, Element spec) {
Element secretStoreElement = XML.getChild(spec, "secret-store");
if (secretStoreElement != null) {
SecretStore secretStore = new SecretStore();
for (Element group : XML.getChildren(secretStoreElement, "group")) {
secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment"));
}
cluster.setSecretStore(secretStore);
}
}
private void addAthensCopperArgos(ApplicationContainerCluster cluster, ConfigModelContext context) {
if ( ! context.getDeployState().isHosted()) return;
app.getDeployment().map(DeploymentSpec::fromXml)
.ifPresent(deploymentSpec -> {
addIdentityProvider(cluster,
context.getDeployState().getProperties().configServerSpecs(),
context.getDeployState().getProperties().loadBalancerName(),
context.getDeployState().getProperties().ztsUrl(),
context.getDeployState().getProperties().athenzDnsSuffix(),
context.getDeployState().zone(),
deploymentSpec);
addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getEndpoints(), deploymentSpec);
});
}
private void addRotationProperties(ApplicationContainerCluster cluster, Zone zone, Set<ContainerEndpoint> endpoints, DeploymentSpec spec) {
cluster.getContainers().forEach(container -> {
setRotations(container, endpoints, cluster.getName());
container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec)));
});
}
private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) {
Optional<DeploymentInstanceSpec> instance = spec.instance(app.getApplicationId().instance());
if (instance.isEmpty()) return false;
return instance.get().zones().stream()
.anyMatch(declaredZone -> declaredZone.concerns(zone.environment(), Optional.of(zone.region())) &&
declaredZone.active());
}
private void setRotations(Container container, Set<ContainerEndpoint> endpoints, String containerClusterName) {
var rotationsProperty = endpoints.stream()
.filter(endpoint -> endpoint.clusterId().equals(containerClusterName))
.flatMap(endpoint -> endpoint.names().stream())
.collect(Collectors.toUnmodifiableSet());
container.setProp("rotations", String.join(",", rotationsProperty));
}
private void addRoutingAliases(ApplicationContainerCluster cluster, Element spec, Environment environment) {
if (environment != Environment.prod) return;
Element aliases = XML.getChild(spec, "aliases");
for (Element alias : XML.getChildren(aliases, "service-alias")) {
cluster.serviceAliases().add(XML.getValue(alias));
}
for (Element alias : XML.getChildren(aliases, "endpoint-alias")) {
cluster.endpointAliases().add(XML.getValue(alias));
}
}
private void addConfiguredComponents(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element components : XML.getChildren(spec, "components")) {
addIncludes(components);
addConfiguredComponents(deployState, cluster, components, "component");
}
addConfiguredComponents(deployState, cluster, spec, "component");
}
protected void addStatusHandlers(ApplicationContainerCluster cluster, boolean isHostedVespa) {
if (isHostedVespa) {
String name = "status.html";
Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_SETTING));
cluster.addComponent(
new FileStatusHandlerComponent(name + "-status-handler", statusFile.orElse(HOSTED_VESPA_STATUS_FILE),
"http:
} else {
cluster.addVipHandler();
}
}
private void addClientProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
for (Element clientSpec: XML.getChildren(spec, "client")) {
cluster.addComponent(new DomClientProviderBuilder(cluster).build(deployState, cluster, clientSpec));
}
}
private void addServerProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
addConfiguredComponents(deployState, cluster, spec, "server");
}
private void addAccessLogs(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
List<Element> accessLogElements = getAccessLogElements(spec);
for (Element accessLog : accessLogElements) {
AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(cluster::addComponent);
}
if (accessLogElements.isEmpty() && deployState.getAccessLoggingEnabledByDefault())
cluster.addDefaultSearchAccessLog();
}
private List<Element> getAccessLogElements(Element spec) {
return XML.getChildren(spec, "accesslog");
}
private void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element httpElement = XML.getChild(spec, "http");
if (httpElement != null) {
cluster.setHttp(buildHttp(deployState, cluster, httpElement));
}
if (isHostedTenantApplication(context)) {
addHostedImplicitHttpIfNotPresent(deployState, cluster);
addAdditionalHostedConnector(deployState, cluster);
}
}
private void addAdditionalHostedConnector(DeployState deployState, ApplicationContainerCluster cluster) {
JettyHttpServer server = cluster.getHttp().getHttpServer();
String serverName = server.getComponentId().getName();
String proxyProtocol = deployState.getProperties().proxyProtocol();
if (deployState.endpointCertificateSecrets().isPresent()) {
boolean authorizeClient = deployState.zone().system().isPublic();
if (authorizeClient && deployState.tlsClientAuthority().isEmpty()) {
throw new RuntimeException("Client certificate authority security/clients.pem is missing - see: https:
}
EndpointCertificateSecrets endpointCertificateSecrets = deployState.endpointCertificateSecrets().get();
HostedSslConnectorFactory connectorFactory = authorizeClient
? HostedSslConnectorFactory.withProvidedCertificateAndTruststore(proxyProtocol, serverName, endpointCertificateSecrets, deployState.tlsClientAuthority().get())
: HostedSslConnectorFactory.withProvidedCertificate(proxyProtocol, serverName, endpointCertificateSecrets);
server.addConnector(connectorFactory);
} else {
server.addConnector(HostedSslConnectorFactory.withDefaultCertificateAndTruststore(proxyProtocol, serverName));
}
}
private static boolean isHostedTenantApplication(ConfigModelContext context) {
var deployState = context.getDeployState();
boolean isTesterApplication = deployState.getProperties().applicationId().instance().isTester();
return deployState.isHosted() && context.getApplicationType() == ApplicationType.DEFAULT && !isTesterApplication;
}
private static void addHostedImplicitHttpIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
if(cluster.getHttp() == null) {
Http http = deployState.getProperties().athenzDomain()
.map(tenantDomain -> createHostedImplicitHttpWithAccessControl(deployState, tenantDomain, cluster))
.orElseGet(() -> createHostedImplicitHttpWithoutAccessControl(cluster));
cluster.setHttp(http);
}
if(cluster.getHttp().getHttpServer() == null) {
JettyHttpServer defaultHttpServer = new JettyHttpServer(new ComponentId("DefaultHttpServer"));
cluster.getHttp().setHttpServer(defaultHttpServer);
defaultHttpServer.addConnector(new ConnectorFactory("SearchServer", Defaults.getDefaults().vespaWebServicePort()));
}
}
private static Http createHostedImplicitHttpWithAccessControl(
DeployState deployState, AthenzDomain tenantDomain, ApplicationContainerCluster cluster) {
AccessControl accessControl =
new AccessControl.Builder(tenantDomain.value(), deployState.getDeployLogger())
.setHandlers(cluster)
.readEnabled(false)
.writeEnabled(false)
.build();
Http http = new Http(accessControl.getBindings(), accessControl);
FilterChains filterChains = new FilterChains(cluster);
filterChains.add(new Chain<>(FilterChains.emptyChainSpec(ACCESS_CONTROL_CHAIN_ID)));
http.setFilterChains(filterChains);
return http;
}
private static Http createHostedImplicitHttpWithoutAccessControl(ApplicationContainerCluster cluster) {
Http http = new Http(Collections.emptyList());
http.setFilterChains(new FilterChains(cluster));
return http;
}
private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement) {
Http http = new HttpBuilder().build(deployState, cluster, httpElement);
if (networking == Networking.disable)
http.removeAllServers();
return http;
}
private void addRestApis(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
for (Element restApiElem : XML.getChildren(spec, "rest-api")) {
cluster.addRestApi(
new RestApiBuilder().build(deployState, cluster, restApiElem));
}
}
private void addServlets(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
for (Element servletElem : XML.getChildren(spec, "servlet"))
cluster.addServlet(new ServletBuilder().build(deployState, cluster, servletElem));
}
private void addDocumentApi(Element spec, ApplicationContainerCluster cluster) {
ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec);
if (containerDocumentApi == null) return;
cluster.setDocumentApi(containerDocumentApi);
}
private void addDocproc(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec);
if (containerDocproc == null) return;
cluster.setDocproc(containerDocproc);
ContainerDocproc.Options docprocOptions = containerDocproc.options;
cluster.setMbusParams(new ApplicationContainerCluster.MbusParams(
docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory));
}
private void addSearch(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
Element searchElement = XML.getChild(spec, "search");
if (searchElement == null) return;
addIncludes(searchElement);
cluster.setSearch(buildSearch(deployState, cluster, searchElement));
addSearchHandler(cluster, searchElement);
addGUIHandler(cluster);
validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private void addModelEvaluation(Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element modelEvaluationElement = XML.getChild(spec, "model-evaluation");
if (modelEvaluationElement == null) return;
RankProfileList profiles =
context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty;
cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles));
}
private void addProcessing(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
Element processingElement = XML.getChild(spec, "processing");
if (processingElement == null) return;
addIncludes(processingElement);
cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement),
serverBindings(processingElement, ProcessingChains.defaultBindings));
validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private ContainerSearch buildSearch(DeployState deployState, ApplicationContainerCluster containerCluster, Element producerSpec) {
SearchChains searchChains = new DomSearchChainsBuilder(null, false).build(deployState, containerCluster, producerSpec);
ContainerSearch containerSearch = new ContainerSearch(containerCluster, searchChains, new ContainerSearch.Options());
applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch);
containerSearch.setQueryProfiles(deployState.getQueryProfiles());
containerSearch.setSemanticRules(deployState.getSemanticRules());
return containerSearch;
}
private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) {
PageTemplates.validate(applicationPackage);
containerSearch.setPageTemplates(PageTemplates.create(applicationPackage));
}
private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element component: XML.getChildren(spec, "handler")) {
cluster.addComponent(
new DomHandlerBuilder(cluster).build(deployState, cluster, component));
}
}
private void checkVersion(Element spec) {
String version = spec.getAttribute("version");
if ( ! Version.fromString(version).equals(new Version(1))) {
throw new RuntimeException("Expected container version to be 1.0, but got " + version);
}
}
private void checkTagName(Element spec, DeployLogger logger) {
if (spec.getTagName().equals(DEPRECATED_CONTAINER_TAG)) {
logger.log(WARNING, "'" + DEPRECATED_CONTAINER_TAG + "' is deprecated as tag name. Use '" + CONTAINER_TAG + "' instead.");
}
}
private void addNodes(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
if (standaloneBuilder)
addStandaloneNode(cluster);
else
addNodesFromXml(cluster, spec, context);
}
private void addStandaloneNode(ApplicationContainerCluster cluster) {
ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), cluster.isHostedVespa());
cluster.addContainers(Collections.singleton(container));
}
static boolean incompatibleGCOptions(String jvmargs) {
Pattern gcAlgorithm = Pattern.compile("-XX:[-+]Use.+GC");
Pattern cmsArgs = Pattern.compile("-XX:[-+]*CMS");
return (gcAlgorithm.matcher(jvmargs).find() ||cmsArgs.matcher(jvmargs).find());
}
private static String buildJvmGCOptions(Zone zone, String jvmGCOPtions, boolean isHostedVespa) {
if (jvmGCOPtions != null) {
return jvmGCOPtions;
} else if ((zone.system() == SystemName.dev) || isHostedVespa) {
return null;
} else {
return ContainerCluster.G1GC;
}
}
private static String getJvmOptions(ApplicationContainerCluster cluster, Element nodesElement, DeployLogger deployLogger) {
String jvmOptions = "";
if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS);
if (nodesElement.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) {
String jvmArgs = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME);
throw new IllegalArgumentException("You have specified both jvm-options='" + jvmOptions + "'" +
" and deprecated jvmargs='" + jvmArgs + "'. Merge jvmargs into jvm-options.");
}
} else {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME);
if (incompatibleGCOptions(jvmOptions)) {
deployLogger.log(WARNING, "You need to move out your GC related options from 'jvmargs' to 'jvm-gc-options'");
cluster.setJvmGCOptions(ContainerCluster.G1GC);
}
}
return jvmOptions;
}
void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployLogger()));
if (!cluster.getJvmGCOptions().isPresent()) {
String jvmGCOptions = nodesElement.hasAttribute(VespaDomBuilder.JVM_GC_OPTIONS)
? nodesElement.getAttribute(VespaDomBuilder.JVM_GC_OPTIONS)
: null;
cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState().zone(), jvmGCOptions, context.getDeployState().isHosted()));
}
applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
}
void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element jvmElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, jvmElement.getAttribute(VespaDomBuilder.OPTIONS));
applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
String jvmGCOptions = jvmElement.hasAttribute(VespaDomBuilder.GC_OPTIONS)
? jvmElement.getAttribute(VespaDomBuilder.GC_OPTIONS)
: null;
cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState().zone(), jvmGCOptions, context.getDeployState().isHosted()));
}
private void addNodesFromXml(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) {
Element nodesElement = XML.getChild(containerElement, "nodes");
if (nodesElement == null) {
cluster.addContainers(allocateWithoutNodesTag(cluster, containerElement, context));
} else {
List<ApplicationContainer> nodes = createNodes(cluster, nodesElement, context);
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) {
extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context);
} else {
extractJvmTag(nodes, cluster, jvmElement, context);
}
applyRoutingAliasProperties(nodes, cluster);
applyDefaultPreload(nodes, nodesElement);
String environmentVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT));
if (environmentVars != null && !environmentVars.isEmpty()) {
cluster.setEnvironmentVars(environmentVars);
}
if (useCpuSocketAffinity(nodesElement))
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
}
private static String getEnvironmentVariables(Element environmentVariables) {
StringBuilder sb = new StringBuilder();
if (environmentVariables != null) {
for (Element var: XML.getChildren(environmentVariables)) {
sb.append(var.getNodeName()).append('=').append(var.getTextContent()).append(' ');
}
}
return sb.toString();
}
private List<ApplicationContainer> createNodes(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
if (nodesElement.hasAttribute("type"))
return createNodesFromNodeType(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("of"))
return createNodesFromContentServiceReference(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("count"))
return createNodesFromNodeCount(cluster, nodesElement, context);
else if (cluster.isHostedVespa() && cluster.getZone().environment().isManuallyDeployed())
return createNodesFromNodeCount(cluster, nodesElement, context);
else
return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement);
}
private static void applyRoutingAliasProperties(List<ApplicationContainer> result, ApplicationContainerCluster cluster) {
if (!cluster.serviceAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("servicealiases", cluster.serviceAliases().stream().collect(Collectors.joining(",")));
});
}
if (!cluster.endpointAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("endpointaliases", cluster.endpointAliases().stream().collect(Collectors.joining(",")));
});
}
}
private static void applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) {
if (memoryPercentage == null || memoryPercentage.isEmpty()) return;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
try {
cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage));
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
}
}
/** Allocate a container cluster without a nodes tag */
private List<ApplicationContainer> singleHostContainerCluster(ApplicationContainerCluster cluster, HostResource host, ConfigModelContext context) {
ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, cluster.isHostedVespa());
node.setHostResource(host);
node.initService(context.getDeployLogger());
return List.of(node);
}
private List<ApplicationContainer> createNodesFromNodeCount(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().hostSystem(),
ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
log);
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
private List<ApplicationContainer> createNodesFromNodeType(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodeType type = NodeType.valueOf(nodesElement.getAttribute("type"));
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(context.getDeployState().getWantedNodeVespaVersion())
.dockerImageRepo(context.getDeployState().getWantedDockerImageRepo())
.build();
Map<HostResource, ClusterMembership> hosts =
cluster.getRoot().hostSystem().allocateHosts(clusterSpec,
Capacity.fromRequiredNodeType(type), 1, log);
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
private List<ApplicationContainer> createNodesFromContentServiceReference(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodeSpecification;
try {
nodeSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(cluster + " contains an invalid reference", e);
}
String referenceId = nodesElement.getAttribute("of");
cluster.setHostClusterId(referenceId);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodeSpecification,
referenceId,
cluster.getRoot().hostSystem(),
context.getDeployLogger());
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
/**
* This is used in case we are on hosted Vespa and no nodes tag is supplied:
* If there are content clusters this will pick the first host in the first cluster as the container node.
* If there are no content clusters this will return empty (such that the node can be created by the container here).
*/
private Optional<HostResource> getHostResourceFromContentClusters(ApplicationContainerCluster cluster, Element containersElement, ConfigModelContext context) {
Optional<Element> services = servicesRootOf(containersElement);
if ( ! services.isPresent())
return Optional.empty();
List<Element> contentServices = XML.getChildren(services.get(), "content");
if ( contentServices.isEmpty() ) return Optional.empty();
Element contentNodesElementOrNull = XML.getChild(contentServices.get(0), "nodes");
NodesSpecification nodesSpec;
if (contentNodesElementOrNull == null)
nodesSpec = NodesSpecification.nonDedicated(1, context);
else
nodesSpec = NodesSpecification.from(new ModelElement(contentNodesElementOrNull), context);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodesSpec,
contentServices.get(0).getAttribute("id"),
cluster.getRoot().hostSystem(),
context.getDeployLogger());
return Optional.of(hosts.keySet().iterator().next());
}
/** Returns the services element above the given Element, or empty if there is no services element */
private Optional<Element> servicesRootOf(Element element) {
Node parent = element.getParentNode();
if (parent == null) return Optional.empty();
if ( ! (parent instanceof Element)) return Optional.empty();
Element parentElement = (Element)parent;
if (parentElement.getTagName().equals("services")) return Optional.of(parentElement);
return servicesRootOf(parentElement);
}
private List<ApplicationContainer> createNodesFromHosts(DeployLogger deployLogger, Map<HostResource, ClusterMembership> hosts, ApplicationContainerCluster cluster) {
List<ApplicationContainer> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), cluster.isHostedVespa());
container.setHostResource(entry.getKey());
container.initService(deployLogger);
nodes.add(container);
}
return nodes;
}
private List<ApplicationContainer> createNodesFromNodeList(DeployState deployState, ApplicationContainerCluster cluster, Element nodesElement) {
List<ApplicationContainer> nodes = new ArrayList<>();
int nodeIndex = 0;
for (Element nodeElem: XML.getChildren(nodesElement, "node")) {
nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem));
nodeIndex++;
}
return nodes;
}
private static boolean useCpuSocketAffinity(Element nodesElement) {
if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME))
return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME));
else
return false;
}
private static void applyNodesTagJvmArgs(List<ApplicationContainer> containers, String jvmArgs) {
for (Container container: containers) {
if (container.getAssignedJvmOptions().isEmpty())
container.prependJvmOptions(jvmArgs);
}
}
private static void applyDefaultPreload(List<ApplicationContainer> containers, Element nodesElement) {
if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return;
for (Container container: containers)
container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME));
}
private void addSearchHandler(ApplicationContainerCluster cluster, Element searchElement) {
cluster.addComponent(new ProcessingHandler<>(cluster.getSearch().getChains(),
"com.yahoo.search.searchchain.ExecutionFactory"));
ProcessingHandler<SearchChains> searchHandler = new ProcessingHandler<>(cluster.getSearch().getChains(),
"com.yahoo.search.handler.SearchHandler");
String[] defaultBindings = {SEARCH_HANDLER_BINDING};
for (String binding: serverBindings(searchElement, defaultBindings)) {
searchHandler.addServerBindings(binding);
}
cluster.addComponent(searchHandler);
}
private void addGUIHandler(ApplicationContainerCluster cluster) {
Handler<?> guiHandler = new GUIHandler();
guiHandler.addServerBindings("http:
cluster.addComponent(guiHandler);
}
private String[] serverBindings(Element searchElement, String... defaultBindings) {
List<Element> bindings = XML.getChildren(searchElement, "binding");
if (bindings.isEmpty())
return defaultBindings;
return toBindingList(bindings);
}
private String[] toBindingList(List<Element> bindingElements) {
List<String> result = new ArrayList<>();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
result.add(text);
}
return result.toArray(new String[result.size()]);
}
private ContainerDocumentApi buildDocumentApi(ApplicationContainerCluster cluster, Element spec) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
return new ContainerDocumentApi(cluster, documentApiOptions);
}
private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
Element docprocElement = XML.getChild(spec, "document-processing");
if (docprocElement == null)
return null;
addIncludes(docprocElement);
DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement);
ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement);
return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder);
}
private void addIncludes(Element parentElement) {
List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE);
if (includes == null || includes.isEmpty()) {
return;
}
if (app == null) {
throw new IllegalArgumentException("Element <include> given in XML config, but no application package given.");
}
for (Element include : includes) {
addInclude(parentElement, include);
}
}
private void addInclude(Element parentElement, Element include) {
String dirName = include.getAttribute(IncludeDirs.DIR);
app.validateIncludeDir(dirName);
List<Element> includedFiles = Xml.allElemsFromPath(app, dirName);
for (Element includedFile : includedFiles) {
List<Element> includedSubElements = XML.getChildren(includedFile);
for (Element includedSubElement : includedSubElements) {
Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true);
parentElement.appendChild(copiedNode);
}
}
}
private static void addConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster,
Element spec, String componentName) {
for (Element node : XML.getChildren(spec, componentName)) {
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private static void validateAndAddConfiguredComponents(DeployState deployState,
ContainerCluster<? extends Container> cluster,
Element spec, String componentName,
Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private void addIdentityProvider(ApplicationContainerCluster cluster,
List<ConfigServerSpec> configServerSpecs,
HostName loadBalancerName,
URI ztsUrl,
String athenzDnsSuffix,
Zone zone,
DeploymentSpec spec) {
spec.athenzDomain()
.ifPresent(domain -> {
AthenzService service = spec.instance(app.getApplicationId().instance())
.flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region()))
.or(() -> spec.athenzService())
.orElseThrow(() -> new RuntimeException("Missing Athenz service configuration in instance '" + app.getApplicationId().instance() + "'"));
String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix;
IdentityProvider identityProvider = new IdentityProvider(domain, service, getLoadBalancerName(loadBalancerName, configServerSpecs), ztsUrl, zoneDnsSuffix, zone);
cluster.addComponent(identityProvider);
cluster.getContainers().forEach(container -> {
container.setProp("identity.domain", domain.value());
container.setProp("identity.service", service.value());
});
});
}
private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) {
return Optional.ofNullable(loadbalancerName)
.orElseGet(
() -> HostName.from(configServerSpecs.stream()
.findFirst()
.map(ConfigServerSpec::getHostName)
.orElse("unknown")
));
}
/** Disallow renderers named "XmlRenderer" or "JsonRenderer" */
private static void validateRendererElement(Element element) {
String idAttr = element.getAttribute("id");
if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) {
throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr));
}
}
public static boolean isContainerTag(Element element) {
return CONTAINER_TAG.equals(element.getTagName()) || DEPRECATED_CONTAINER_TAG.equals(element.getTagName());
}
} |
" ... AND feature flag is true in all zones"? | private List<ApplicationContainer> allocateWithoutNodesTag(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
HostSystem hostSystem = cluster.hostSystem();
if (deployState.isHosted()) {
if (!context.properties().useDedicatedNodesWhenUnspecified()) {
Optional<HostResource> singleContentHost = getHostResourceFromContentClusters(cluster, containerElement, context);
if (singleContentHost.isPresent()) {
return singleHostContainerCluster(cluster, singleContentHost.get(), context);
}
else {
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(deployState.getWantedNodeVespaVersion())
.dockerImageRepo(deployState.getWantedDockerImageRepo())
.build();
Capacity capacity = Capacity.fromCount(1,
Optional.empty(),
false,
! deployState.getProperties().isBootstrap());
HostResource host = hostSystem.allocateHosts(clusterSpec, capacity, 1, log).keySet().iterator().next();
return singleHostContainerCluster(cluster, host, context);
}
}
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
deployState.getWantedNodeVespaVersion(),
false,
Optional.empty(),
deployState.getWantedDockerImageRepo());
int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1;
Capacity capacity = Capacity.fromCount(nodeCount,
Optional.empty(),
false,
!deployState.getProperties().isBootstrap());
var hosts = hostSystem.allocateHosts(clusterSpec, capacity, 1, log);
return createNodesFromHosts(log, hosts, cluster);
}
return singleHostContainerCluster(cluster, hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC), context);
} | private List<ApplicationContainer> allocateWithoutNodesTag(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
HostSystem hostSystem = cluster.hostSystem();
if (deployState.isHosted()) {
if (!context.properties().useDedicatedNodesWhenUnspecified()) {
Optional<HostResource> singleContentHost = getHostResourceFromContentClusters(cluster, containerElement, context);
if (singleContentHost.isPresent()) {
return singleHostContainerCluster(cluster, singleContentHost.get(), context);
}
else {
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(deployState.getWantedNodeVespaVersion())
.dockerImageRepo(deployState.getWantedDockerImageRepo())
.build();
Capacity capacity = Capacity.fromCount(1,
Optional.empty(),
false,
! deployState.getProperties().isBootstrap());
HostResource host = hostSystem.allocateHosts(clusterSpec, capacity, 1, log).keySet().iterator().next();
return singleHostContainerCluster(cluster, host, context);
}
}
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(deployState.getWantedNodeVespaVersion())
.dockerImageRepo(deployState.getWantedDockerImageRepo())
.build();
int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1;
Capacity capacity = Capacity.fromCount(nodeCount,
Optional.empty(),
false,
!deployState.getProperties().isBootstrap());
var hosts = hostSystem.allocateHosts(clusterSpec, capacity, 1, log);
return createNodesFromHosts(log, hosts, cluster);
}
return singleHostContainerCluster(cluster, hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC), context);
} | class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html");
private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE";
private static final String CONTAINER_TAG = "container";
private static final String DEPRECATED_CONTAINER_TAG = "jdisc";
private static final String ENVIRONMENT_VARIABLES_ELEMENT = "environment-variables";
static final String SEARCH_HANDLER_CLASS = com.yahoo.search.handler.SearchHandler.class.getName();
static final String SEARCH_HANDLER_BINDING = "http:
public enum Networking { disable, enable }
private ApplicationPackage app;
private final boolean standaloneBuilder;
private final Networking networking;
private final boolean rpcServerEnabled;
private final boolean httpServerEnabled;
protected DeployLogger log;
public static final List<ConfigModelId> configModelIds =
ImmutableList.of(ConfigModelId.fromName(CONTAINER_TAG), ConfigModelId.fromName(DEPRECATED_CONTAINER_TAG));
private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName();
private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName();
public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) {
super(ContainerModel.class);
this.standaloneBuilder = standaloneBuilder;
this.networking = networking;
this.rpcServerEnabled = !standaloneBuilder;
this.httpServerEnabled = networking == Networking.enable;
}
@Override
public List<ConfigModelId> handlesElements() {
return configModelIds;
}
@Override
public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) {
log = modelContext.getDeployLogger();
app = modelContext.getApplicationPackage();
checkVersion(spec);
checkTagName(spec, log);
ApplicationContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
addBundlesForPlatformComponents(cluster);
cluster.setMessageBusEnabled(rpcServerEnabled);
cluster.setRpcServerEnabled(rpcServerEnabled);
cluster.setHttpServerEnabled(httpServerEnabled);
model.setCluster(cluster);
}
private void addBundlesForPlatformComponents(ApplicationContainerCluster cluster) {
for (Component<?, ?> component : cluster.getAllComponents()) {
String componentClass = component.model.bundleInstantiationSpec.getClassName();
BundleMapper.getBundlePath(componentClass).
ifPresent(cluster::addPlatformBundle);
}
}
private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilder<ApplicationContainerCluster>() {
@Override
protected ApplicationContainerCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) {
return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(),
modelContext.getProducerId(), deployState);
}
}.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec);
}
private void addClusterContent(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
DocumentFactoryBuilder.buildDocumentFactories(cluster, spec);
addConfiguredComponents(deployState, cluster, spec);
addSecretStore(cluster, spec);
addRestApis(deployState, spec, cluster);
addServlets(deployState, spec, cluster);
addModelEvaluation(spec, cluster, context);
addProcessing(deployState, spec, cluster);
addSearch(deployState, spec, cluster);
addDocproc(deployState, spec, cluster);
addDocumentApi(spec, cluster);
cluster.addDefaultHandlersExceptStatus();
addStatusHandlers(cluster, context.getDeployState().isHosted());
addUserHandlers(deployState, cluster, spec);
addHttp(deployState, spec, cluster, context);
addAccessLogs(deployState, cluster, spec);
addRoutingAliases(cluster, spec, deployState.zone().environment());
addNodes(cluster, spec, context);
addClientProviders(deployState, spec, cluster);
addServerProviders(deployState, spec, cluster);
addAthensCopperArgos(cluster, context);
}
private void addSecretStore(ApplicationContainerCluster cluster, Element spec) {
Element secretStoreElement = XML.getChild(spec, "secret-store");
if (secretStoreElement != null) {
SecretStore secretStore = new SecretStore();
for (Element group : XML.getChildren(secretStoreElement, "group")) {
secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment"));
}
cluster.setSecretStore(secretStore);
}
}
private void addAthensCopperArgos(ApplicationContainerCluster cluster, ConfigModelContext context) {
if ( ! context.getDeployState().isHosted()) return;
app.getDeployment().map(DeploymentSpec::fromXml)
.ifPresent(deploymentSpec -> {
addIdentityProvider(cluster,
context.getDeployState().getProperties().configServerSpecs(),
context.getDeployState().getProperties().loadBalancerName(),
context.getDeployState().getProperties().ztsUrl(),
context.getDeployState().getProperties().athenzDnsSuffix(),
context.getDeployState().zone(),
deploymentSpec);
addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getEndpoints(), deploymentSpec);
});
}
private void addRotationProperties(ApplicationContainerCluster cluster, Zone zone, Set<ContainerEndpoint> endpoints, DeploymentSpec spec) {
cluster.getContainers().forEach(container -> {
setRotations(container, endpoints, cluster.getName());
container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec)));
});
}
private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) {
Optional<DeploymentInstanceSpec> instance = spec.instance(app.getApplicationId().instance());
if (instance.isEmpty()) return false;
return instance.get().zones().stream()
.anyMatch(declaredZone -> declaredZone.concerns(zone.environment(), Optional.of(zone.region())) &&
declaredZone.active());
}
private void setRotations(Container container, Set<ContainerEndpoint> endpoints, String containerClusterName) {
var rotationsProperty = endpoints.stream()
.filter(endpoint -> endpoint.clusterId().equals(containerClusterName))
.flatMap(endpoint -> endpoint.names().stream())
.collect(Collectors.toUnmodifiableSet());
container.setProp("rotations", String.join(",", rotationsProperty));
}
private void addRoutingAliases(ApplicationContainerCluster cluster, Element spec, Environment environment) {
if (environment != Environment.prod) return;
Element aliases = XML.getChild(spec, "aliases");
for (Element alias : XML.getChildren(aliases, "service-alias")) {
cluster.serviceAliases().add(XML.getValue(alias));
}
for (Element alias : XML.getChildren(aliases, "endpoint-alias")) {
cluster.endpointAliases().add(XML.getValue(alias));
}
}
private void addConfiguredComponents(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element components : XML.getChildren(spec, "components")) {
addIncludes(components);
addConfiguredComponents(deployState, cluster, components, "component");
}
addConfiguredComponents(deployState, cluster, spec, "component");
}
protected void addStatusHandlers(ApplicationContainerCluster cluster, boolean isHostedVespa) {
if (isHostedVespa) {
String name = "status.html";
Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_SETTING));
cluster.addComponent(
new FileStatusHandlerComponent(name + "-status-handler", statusFile.orElse(HOSTED_VESPA_STATUS_FILE),
"http:
} else {
cluster.addVipHandler();
}
}
private void addClientProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
for (Element clientSpec: XML.getChildren(spec, "client")) {
cluster.addComponent(new DomClientProviderBuilder(cluster).build(deployState, cluster, clientSpec));
}
}
private void addServerProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
addConfiguredComponents(deployState, cluster, spec, "server");
}
private void addAccessLogs(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
List<Element> accessLogElements = getAccessLogElements(spec);
for (Element accessLog : accessLogElements) {
AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(cluster::addComponent);
}
if (accessLogElements.isEmpty() && deployState.getAccessLoggingEnabledByDefault())
cluster.addDefaultSearchAccessLog();
}
private List<Element> getAccessLogElements(Element spec) {
return XML.getChildren(spec, "accesslog");
}
private void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element httpElement = XML.getChild(spec, "http");
if (httpElement != null) {
cluster.setHttp(buildHttp(deployState, cluster, httpElement));
}
if (isHostedTenantApplication(context)) {
addHostedImplicitHttpIfNotPresent(deployState, cluster);
addAdditionalHostedConnector(deployState, cluster);
}
}
private void addAdditionalHostedConnector(DeployState deployState, ApplicationContainerCluster cluster) {
JettyHttpServer server = cluster.getHttp().getHttpServer();
String serverName = server.getComponentId().getName();
String proxyProtocol = deployState.getProperties().proxyProtocol();
if (deployState.endpointCertificateSecrets().isPresent()) {
boolean authorizeClient = deployState.zone().system().isPublic();
if (authorizeClient && deployState.tlsClientAuthority().isEmpty()) {
throw new RuntimeException("Client certificate authority security/clients.pem is missing - see: https:
}
EndpointCertificateSecrets endpointCertificateSecrets = deployState.endpointCertificateSecrets().get();
HostedSslConnectorFactory connectorFactory = authorizeClient
? HostedSslConnectorFactory.withProvidedCertificateAndTruststore(proxyProtocol, serverName, endpointCertificateSecrets, deployState.tlsClientAuthority().get())
: HostedSslConnectorFactory.withProvidedCertificate(proxyProtocol, serverName, endpointCertificateSecrets);
server.addConnector(connectorFactory);
} else {
server.addConnector(HostedSslConnectorFactory.withDefaultCertificateAndTruststore(proxyProtocol, serverName));
}
}
private static boolean isHostedTenantApplication(ConfigModelContext context) {
var deployState = context.getDeployState();
boolean isTesterApplication = deployState.getProperties().applicationId().instance().isTester();
return deployState.isHosted() && context.getApplicationType() == ApplicationType.DEFAULT && !isTesterApplication;
}
private static void addHostedImplicitHttpIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
if(cluster.getHttp() == null) {
Http http = deployState.getProperties().athenzDomain()
.map(tenantDomain -> createHostedImplicitHttpWithAccessControl(deployState, tenantDomain, cluster))
.orElseGet(() -> createHostedImplicitHttpWithoutAccessControl(cluster));
cluster.setHttp(http);
}
if(cluster.getHttp().getHttpServer() == null) {
JettyHttpServer defaultHttpServer = new JettyHttpServer(new ComponentId("DefaultHttpServer"));
cluster.getHttp().setHttpServer(defaultHttpServer);
defaultHttpServer.addConnector(new ConnectorFactory("SearchServer", Defaults.getDefaults().vespaWebServicePort()));
}
}
private static Http createHostedImplicitHttpWithAccessControl(
DeployState deployState, AthenzDomain tenantDomain, ApplicationContainerCluster cluster) {
AccessControl accessControl =
new AccessControl.Builder(tenantDomain.value(), deployState.getDeployLogger())
.setHandlers(cluster)
.readEnabled(false)
.writeEnabled(false)
.build();
Http http = new Http(accessControl.getBindings(), accessControl);
FilterChains filterChains = new FilterChains(cluster);
filterChains.add(new Chain<>(FilterChains.emptyChainSpec(ACCESS_CONTROL_CHAIN_ID)));
http.setFilterChains(filterChains);
return http;
}
private static Http createHostedImplicitHttpWithoutAccessControl(ApplicationContainerCluster cluster) {
Http http = new Http(Collections.emptyList());
http.setFilterChains(new FilterChains(cluster));
return http;
}
private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement) {
Http http = new HttpBuilder().build(deployState, cluster, httpElement);
if (networking == Networking.disable)
http.removeAllServers();
return http;
}
private void addRestApis(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
for (Element restApiElem : XML.getChildren(spec, "rest-api")) {
cluster.addRestApi(
new RestApiBuilder().build(deployState, cluster, restApiElem));
}
}
private void addServlets(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
for (Element servletElem : XML.getChildren(spec, "servlet"))
cluster.addServlet(new ServletBuilder().build(deployState, cluster, servletElem));
}
private void addDocumentApi(Element spec, ApplicationContainerCluster cluster) {
ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec);
if (containerDocumentApi == null) return;
cluster.setDocumentApi(containerDocumentApi);
}
private void addDocproc(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec);
if (containerDocproc == null) return;
cluster.setDocproc(containerDocproc);
ContainerDocproc.Options docprocOptions = containerDocproc.options;
cluster.setMbusParams(new ApplicationContainerCluster.MbusParams(
docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory));
}
private void addSearch(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
Element searchElement = XML.getChild(spec, "search");
if (searchElement == null) return;
addIncludes(searchElement);
cluster.setSearch(buildSearch(deployState, cluster, searchElement));
addSearchHandler(cluster, searchElement);
addGUIHandler(cluster);
validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private void addModelEvaluation(Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element modelEvaluationElement = XML.getChild(spec, "model-evaluation");
if (modelEvaluationElement == null) return;
RankProfileList profiles =
context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty;
cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles));
}
private void addProcessing(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
Element processingElement = XML.getChild(spec, "processing");
if (processingElement == null) return;
addIncludes(processingElement);
cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement),
serverBindings(processingElement, ProcessingChains.defaultBindings));
validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private ContainerSearch buildSearch(DeployState deployState, ApplicationContainerCluster containerCluster, Element producerSpec) {
SearchChains searchChains = new DomSearchChainsBuilder(null, false).build(deployState, containerCluster, producerSpec);
ContainerSearch containerSearch = new ContainerSearch(containerCluster, searchChains, new ContainerSearch.Options());
applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch);
containerSearch.setQueryProfiles(deployState.getQueryProfiles());
containerSearch.setSemanticRules(deployState.getSemanticRules());
return containerSearch;
}
private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) {
PageTemplates.validate(applicationPackage);
containerSearch.setPageTemplates(PageTemplates.create(applicationPackage));
}
private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element component: XML.getChildren(spec, "handler")) {
cluster.addComponent(
new DomHandlerBuilder(cluster).build(deployState, cluster, component));
}
}
private void checkVersion(Element spec) {
String version = spec.getAttribute("version");
if ( ! Version.fromString(version).equals(new Version(1))) {
throw new RuntimeException("Expected container version to be 1.0, but got " + version);
}
}
private void checkTagName(Element spec, DeployLogger logger) {
if (spec.getTagName().equals(DEPRECATED_CONTAINER_TAG)) {
logger.log(WARNING, "'" + DEPRECATED_CONTAINER_TAG + "' is deprecated as tag name. Use '" + CONTAINER_TAG + "' instead.");
}
}
private void addNodes(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
if (standaloneBuilder)
addStandaloneNode(cluster);
else
addNodesFromXml(cluster, spec, context);
}
private void addStandaloneNode(ApplicationContainerCluster cluster) {
ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), cluster.isHostedVespa());
cluster.addContainers(Collections.singleton(container));
}
static boolean incompatibleGCOptions(String jvmargs) {
Pattern gcAlgorithm = Pattern.compile("-XX:[-+]Use.+GC");
Pattern cmsArgs = Pattern.compile("-XX:[-+]*CMS");
return (gcAlgorithm.matcher(jvmargs).find() ||cmsArgs.matcher(jvmargs).find());
}
private static String buildJvmGCOptions(Zone zone, String jvmGCOPtions, boolean isHostedVespa) {
if (jvmGCOPtions != null) {
return jvmGCOPtions;
} else if ((zone.system() == SystemName.dev) || isHostedVespa) {
return null;
} else {
return ContainerCluster.G1GC;
}
}
private static String getJvmOptions(ApplicationContainerCluster cluster, Element nodesElement, DeployLogger deployLogger) {
String jvmOptions = "";
if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS);
if (nodesElement.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) {
String jvmArgs = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME);
throw new IllegalArgumentException("You have specified both jvm-options='" + jvmOptions + "'" +
" and deprecated jvmargs='" + jvmArgs + "'. Merge jvmargs into jvm-options.");
}
} else {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME);
if (incompatibleGCOptions(jvmOptions)) {
deployLogger.log(WARNING, "You need to move out your GC related options from 'jvmargs' to 'jvm-gc-options'");
cluster.setJvmGCOptions(ContainerCluster.G1GC);
}
}
return jvmOptions;
}
void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployLogger()));
if (!cluster.getJvmGCOptions().isPresent()) {
String jvmGCOptions = nodesElement.hasAttribute(VespaDomBuilder.JVM_GC_OPTIONS)
? nodesElement.getAttribute(VespaDomBuilder.JVM_GC_OPTIONS)
: null;
cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState().zone(), jvmGCOptions, context.getDeployState().isHosted()));
}
applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
}
void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element jvmElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, jvmElement.getAttribute(VespaDomBuilder.OPTIONS));
applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
String jvmGCOptions = jvmElement.hasAttribute(VespaDomBuilder.GC_OPTIONS)
? jvmElement.getAttribute(VespaDomBuilder.GC_OPTIONS)
: null;
cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState().zone(), jvmGCOptions, context.getDeployState().isHosted()));
}
private void addNodesFromXml(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) {
Element nodesElement = XML.getChild(containerElement, "nodes");
if (nodesElement == null) {
cluster.addContainers(allocateWithoutNodesTag(cluster, containerElement, context));
} else {
List<ApplicationContainer> nodes = createNodes(cluster, nodesElement, context);
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) {
extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context);
} else {
extractJvmTag(nodes, cluster, jvmElement, context);
}
applyRoutingAliasProperties(nodes, cluster);
applyDefaultPreload(nodes, nodesElement);
String environmentVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT));
if (environmentVars != null && !environmentVars.isEmpty()) {
cluster.setEnvironmentVars(environmentVars);
}
if (useCpuSocketAffinity(nodesElement))
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
}
private static String getEnvironmentVariables(Element environmentVariables) {
StringBuilder sb = new StringBuilder();
if (environmentVariables != null) {
for (Element var: XML.getChildren(environmentVariables)) {
sb.append(var.getNodeName()).append('=').append(var.getTextContent()).append(' ');
}
}
return sb.toString();
}
private List<ApplicationContainer> createNodes(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
if (nodesElement.hasAttribute("type"))
return createNodesFromNodeType(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("of"))
return createNodesFromContentServiceReference(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("count"))
return createNodesFromNodeCount(cluster, nodesElement, context);
else if (cluster.isHostedVespa() && cluster.getZone().environment().isManuallyDeployed())
return createNodesFromNodeCount(cluster, nodesElement, context);
else
return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement);
}
private static void applyRoutingAliasProperties(List<ApplicationContainer> result, ApplicationContainerCluster cluster) {
if (!cluster.serviceAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("servicealiases", cluster.serviceAliases().stream().collect(Collectors.joining(",")));
});
}
if (!cluster.endpointAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("endpointaliases", cluster.endpointAliases().stream().collect(Collectors.joining(",")));
});
}
}
private static void applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) {
if (memoryPercentage == null || memoryPercentage.isEmpty()) return;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
try {
cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage));
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
}
}
/** Allocate a container cluster without a nodes tag */
private List<ApplicationContainer> singleHostContainerCluster(ApplicationContainerCluster cluster, HostResource host, ConfigModelContext context) {
ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, cluster.isHostedVespa());
node.setHostResource(host);
node.initService(context.getDeployLogger());
return List.of(node);
}
private List<ApplicationContainer> createNodesFromNodeCount(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().hostSystem(),
ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
log);
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
private List<ApplicationContainer> createNodesFromNodeType(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodeType type = NodeType.valueOf(nodesElement.getAttribute("type"));
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(context.getDeployState().getWantedNodeVespaVersion())
.dockerImageRepo(context.getDeployState().getWantedDockerImageRepo())
.build();
Map<HostResource, ClusterMembership> hosts =
cluster.getRoot().hostSystem().allocateHosts(clusterSpec,
Capacity.fromRequiredNodeType(type), 1, log);
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
private List<ApplicationContainer> createNodesFromContentServiceReference(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodeSpecification;
try {
nodeSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(cluster + " contains an invalid reference", e);
}
String referenceId = nodesElement.getAttribute("of");
cluster.setHostClusterId(referenceId);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodeSpecification,
referenceId,
cluster.getRoot().hostSystem(),
context.getDeployLogger());
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
/**
* This is used in case we are on hosted Vespa and no nodes tag is supplied:
* If there are content clusters this will pick the first host in the first cluster as the container node.
* If there are no content clusters this will return empty (such that the node can be created by the container here).
*/
private Optional<HostResource> getHostResourceFromContentClusters(ApplicationContainerCluster cluster, Element containersElement, ConfigModelContext context) {
Optional<Element> services = servicesRootOf(containersElement);
if ( ! services.isPresent())
return Optional.empty();
List<Element> contentServices = XML.getChildren(services.get(), "content");
if ( contentServices.isEmpty() ) return Optional.empty();
Element contentNodesElementOrNull = XML.getChild(contentServices.get(0), "nodes");
NodesSpecification nodesSpec;
if (contentNodesElementOrNull == null)
nodesSpec = NodesSpecification.nonDedicated(1, context);
else
nodesSpec = NodesSpecification.from(new ModelElement(contentNodesElementOrNull), context);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodesSpec,
contentServices.get(0).getAttribute("id"),
cluster.getRoot().hostSystem(),
context.getDeployLogger());
return Optional.of(hosts.keySet().iterator().next());
}
/** Returns the services element above the given Element, or empty if there is no services element */
private Optional<Element> servicesRootOf(Element element) {
Node parent = element.getParentNode();
if (parent == null) return Optional.empty();
if ( ! (parent instanceof Element)) return Optional.empty();
Element parentElement = (Element)parent;
if (parentElement.getTagName().equals("services")) return Optional.of(parentElement);
return servicesRootOf(parentElement);
}
private List<ApplicationContainer> createNodesFromHosts(DeployLogger deployLogger, Map<HostResource, ClusterMembership> hosts, ApplicationContainerCluster cluster) {
List<ApplicationContainer> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), cluster.isHostedVespa());
container.setHostResource(entry.getKey());
container.initService(deployLogger);
nodes.add(container);
}
return nodes;
}
private List<ApplicationContainer> createNodesFromNodeList(DeployState deployState, ApplicationContainerCluster cluster, Element nodesElement) {
List<ApplicationContainer> nodes = new ArrayList<>();
int nodeIndex = 0;
for (Element nodeElem: XML.getChildren(nodesElement, "node")) {
nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem));
nodeIndex++;
}
return nodes;
}
private static boolean useCpuSocketAffinity(Element nodesElement) {
if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME))
return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME));
else
return false;
}
private static void applyNodesTagJvmArgs(List<ApplicationContainer> containers, String jvmArgs) {
for (Container container: containers) {
if (container.getAssignedJvmOptions().isEmpty())
container.prependJvmOptions(jvmArgs);
}
}
private static void applyDefaultPreload(List<ApplicationContainer> containers, Element nodesElement) {
if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return;
for (Container container: containers)
container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME));
}
private void addSearchHandler(ApplicationContainerCluster cluster, Element searchElement) {
cluster.addComponent(new ProcessingHandler<>(cluster.getSearch().getChains(),
"com.yahoo.search.searchchain.ExecutionFactory"));
ProcessingHandler<SearchChains> searchHandler = new ProcessingHandler<>(cluster.getSearch().getChains(),
"com.yahoo.search.handler.SearchHandler");
String[] defaultBindings = {SEARCH_HANDLER_BINDING};
for (String binding: serverBindings(searchElement, defaultBindings)) {
searchHandler.addServerBindings(binding);
}
cluster.addComponent(searchHandler);
}
private void addGUIHandler(ApplicationContainerCluster cluster) {
Handler<?> guiHandler = new GUIHandler();
guiHandler.addServerBindings("http:
cluster.addComponent(guiHandler);
}
private String[] serverBindings(Element searchElement, String... defaultBindings) {
List<Element> bindings = XML.getChildren(searchElement, "binding");
if (bindings.isEmpty())
return defaultBindings;
return toBindingList(bindings);
}
private String[] toBindingList(List<Element> bindingElements) {
List<String> result = new ArrayList<>();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
result.add(text);
}
return result.toArray(new String[result.size()]);
}
private ContainerDocumentApi buildDocumentApi(ApplicationContainerCluster cluster, Element spec) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
return new ContainerDocumentApi(cluster, documentApiOptions);
}
private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
Element docprocElement = XML.getChild(spec, "document-processing");
if (docprocElement == null)
return null;
addIncludes(docprocElement);
DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement);
ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement);
return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder);
}
private void addIncludes(Element parentElement) {
List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE);
if (includes == null || includes.isEmpty()) {
return;
}
if (app == null) {
throw new IllegalArgumentException("Element <include> given in XML config, but no application package given.");
}
for (Element include : includes) {
addInclude(parentElement, include);
}
}
private void addInclude(Element parentElement, Element include) {
String dirName = include.getAttribute(IncludeDirs.DIR);
app.validateIncludeDir(dirName);
List<Element> includedFiles = Xml.allElemsFromPath(app, dirName);
for (Element includedFile : includedFiles) {
List<Element> includedSubElements = XML.getChildren(includedFile);
for (Element includedSubElement : includedSubElements) {
Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true);
parentElement.appendChild(copiedNode);
}
}
}
private static void addConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster,
Element spec, String componentName) {
for (Element node : XML.getChildren(spec, componentName)) {
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private static void validateAndAddConfiguredComponents(DeployState deployState,
ContainerCluster<? extends Container> cluster,
Element spec, String componentName,
Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private void addIdentityProvider(ApplicationContainerCluster cluster,
List<ConfigServerSpec> configServerSpecs,
HostName loadBalancerName,
URI ztsUrl,
String athenzDnsSuffix,
Zone zone,
DeploymentSpec spec) {
spec.athenzDomain()
.ifPresent(domain -> {
AthenzService service = spec.instance(app.getApplicationId().instance())
.flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region()))
.or(() -> spec.athenzService())
.orElseThrow(() -> new RuntimeException("Missing Athenz service configuration in instance '" + app.getApplicationId().instance() + "'"));
String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix;
IdentityProvider identityProvider = new IdentityProvider(domain, service, getLoadBalancerName(loadBalancerName, configServerSpecs), ztsUrl, zoneDnsSuffix, zone);
cluster.addComponent(identityProvider);
cluster.getContainers().forEach(container -> {
container.setProp("identity.domain", domain.value());
container.setProp("identity.service", service.value());
});
});
}
private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) {
return Optional.ofNullable(loadbalancerName)
.orElseGet(
() -> HostName.from(configServerSpecs.stream()
.findFirst()
.map(ConfigServerSpec::getHostName)
.orElse("unknown")
));
}
/** Disallow renderers named "XmlRenderer" or "JsonRenderer" */
private static void validateRendererElement(Element element) {
String idAttr = element.getAttribute("id");
if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) {
throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr));
}
}
public static boolean isContainerTag(Element element) {
return CONTAINER_TAG.equals(element.getTagName()) || DEPRECATED_CONTAINER_TAG.equals(element.getTagName());
}
} | class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html");
private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE";
private static final String CONTAINER_TAG = "container";
private static final String DEPRECATED_CONTAINER_TAG = "jdisc";
private static final String ENVIRONMENT_VARIABLES_ELEMENT = "environment-variables";
static final String SEARCH_HANDLER_CLASS = com.yahoo.search.handler.SearchHandler.class.getName();
static final String SEARCH_HANDLER_BINDING = "http:
public enum Networking { disable, enable }
private ApplicationPackage app;
private final boolean standaloneBuilder;
private final Networking networking;
private final boolean rpcServerEnabled;
private final boolean httpServerEnabled;
protected DeployLogger log;
public static final List<ConfigModelId> configModelIds =
ImmutableList.of(ConfigModelId.fromName(CONTAINER_TAG), ConfigModelId.fromName(DEPRECATED_CONTAINER_TAG));
private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName();
private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName();
public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) {
super(ContainerModel.class);
this.standaloneBuilder = standaloneBuilder;
this.networking = networking;
this.rpcServerEnabled = !standaloneBuilder;
this.httpServerEnabled = networking == Networking.enable;
}
@Override
public List<ConfigModelId> handlesElements() {
return configModelIds;
}
@Override
public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) {
log = modelContext.getDeployLogger();
app = modelContext.getApplicationPackage();
checkVersion(spec);
checkTagName(spec, log);
ApplicationContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
addBundlesForPlatformComponents(cluster);
cluster.setMessageBusEnabled(rpcServerEnabled);
cluster.setRpcServerEnabled(rpcServerEnabled);
cluster.setHttpServerEnabled(httpServerEnabled);
model.setCluster(cluster);
}
private void addBundlesForPlatformComponents(ApplicationContainerCluster cluster) {
for (Component<?, ?> component : cluster.getAllComponents()) {
String componentClass = component.model.bundleInstantiationSpec.getClassName();
BundleMapper.getBundlePath(componentClass).
ifPresent(cluster::addPlatformBundle);
}
}
private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilder<ApplicationContainerCluster>() {
@Override
protected ApplicationContainerCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) {
return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(),
modelContext.getProducerId(), deployState);
}
}.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec);
}
private void addClusterContent(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
DocumentFactoryBuilder.buildDocumentFactories(cluster, spec);
addConfiguredComponents(deployState, cluster, spec);
addSecretStore(cluster, spec);
addRestApis(deployState, spec, cluster);
addServlets(deployState, spec, cluster);
addModelEvaluation(spec, cluster, context);
addProcessing(deployState, spec, cluster);
addSearch(deployState, spec, cluster);
addDocproc(deployState, spec, cluster);
addDocumentApi(spec, cluster);
cluster.addDefaultHandlersExceptStatus();
addStatusHandlers(cluster, context.getDeployState().isHosted());
addUserHandlers(deployState, cluster, spec);
addHttp(deployState, spec, cluster, context);
addAccessLogs(deployState, cluster, spec);
addRoutingAliases(cluster, spec, deployState.zone().environment());
addNodes(cluster, spec, context);
addClientProviders(deployState, spec, cluster);
addServerProviders(deployState, spec, cluster);
addAthensCopperArgos(cluster, context);
}
private void addSecretStore(ApplicationContainerCluster cluster, Element spec) {
Element secretStoreElement = XML.getChild(spec, "secret-store");
if (secretStoreElement != null) {
SecretStore secretStore = new SecretStore();
for (Element group : XML.getChildren(secretStoreElement, "group")) {
secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment"));
}
cluster.setSecretStore(secretStore);
}
}
private void addAthensCopperArgos(ApplicationContainerCluster cluster, ConfigModelContext context) {
if ( ! context.getDeployState().isHosted()) return;
app.getDeployment().map(DeploymentSpec::fromXml)
.ifPresent(deploymentSpec -> {
addIdentityProvider(cluster,
context.getDeployState().getProperties().configServerSpecs(),
context.getDeployState().getProperties().loadBalancerName(),
context.getDeployState().getProperties().ztsUrl(),
context.getDeployState().getProperties().athenzDnsSuffix(),
context.getDeployState().zone(),
deploymentSpec);
addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getEndpoints(), deploymentSpec);
});
}
private void addRotationProperties(ApplicationContainerCluster cluster, Zone zone, Set<ContainerEndpoint> endpoints, DeploymentSpec spec) {
cluster.getContainers().forEach(container -> {
setRotations(container, endpoints, cluster.getName());
container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec)));
});
}
private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) {
Optional<DeploymentInstanceSpec> instance = spec.instance(app.getApplicationId().instance());
if (instance.isEmpty()) return false;
return instance.get().zones().stream()
.anyMatch(declaredZone -> declaredZone.concerns(zone.environment(), Optional.of(zone.region())) &&
declaredZone.active());
}
private void setRotations(Container container, Set<ContainerEndpoint> endpoints, String containerClusterName) {
var rotationsProperty = endpoints.stream()
.filter(endpoint -> endpoint.clusterId().equals(containerClusterName))
.flatMap(endpoint -> endpoint.names().stream())
.collect(Collectors.toUnmodifiableSet());
container.setProp("rotations", String.join(",", rotationsProperty));
}
private void addRoutingAliases(ApplicationContainerCluster cluster, Element spec, Environment environment) {
if (environment != Environment.prod) return;
Element aliases = XML.getChild(spec, "aliases");
for (Element alias : XML.getChildren(aliases, "service-alias")) {
cluster.serviceAliases().add(XML.getValue(alias));
}
for (Element alias : XML.getChildren(aliases, "endpoint-alias")) {
cluster.endpointAliases().add(XML.getValue(alias));
}
}
private void addConfiguredComponents(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element components : XML.getChildren(spec, "components")) {
addIncludes(components);
addConfiguredComponents(deployState, cluster, components, "component");
}
addConfiguredComponents(deployState, cluster, spec, "component");
}
protected void addStatusHandlers(ApplicationContainerCluster cluster, boolean isHostedVespa) {
if (isHostedVespa) {
String name = "status.html";
Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_SETTING));
cluster.addComponent(
new FileStatusHandlerComponent(name + "-status-handler", statusFile.orElse(HOSTED_VESPA_STATUS_FILE),
"http:
} else {
cluster.addVipHandler();
}
}
private void addClientProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
for (Element clientSpec: XML.getChildren(spec, "client")) {
cluster.addComponent(new DomClientProviderBuilder(cluster).build(deployState, cluster, clientSpec));
}
}
private void addServerProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
addConfiguredComponents(deployState, cluster, spec, "server");
}
private void addAccessLogs(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
List<Element> accessLogElements = getAccessLogElements(spec);
for (Element accessLog : accessLogElements) {
AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(cluster::addComponent);
}
if (accessLogElements.isEmpty() && deployState.getAccessLoggingEnabledByDefault())
cluster.addDefaultSearchAccessLog();
}
private List<Element> getAccessLogElements(Element spec) {
return XML.getChildren(spec, "accesslog");
}
private void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element httpElement = XML.getChild(spec, "http");
if (httpElement != null) {
cluster.setHttp(buildHttp(deployState, cluster, httpElement));
}
if (isHostedTenantApplication(context)) {
addHostedImplicitHttpIfNotPresent(deployState, cluster);
addAdditionalHostedConnector(deployState, cluster);
}
}
private void addAdditionalHostedConnector(DeployState deployState, ApplicationContainerCluster cluster) {
JettyHttpServer server = cluster.getHttp().getHttpServer();
String serverName = server.getComponentId().getName();
String proxyProtocol = deployState.getProperties().proxyProtocol();
if (deployState.endpointCertificateSecrets().isPresent()) {
boolean authorizeClient = deployState.zone().system().isPublic();
if (authorizeClient && deployState.tlsClientAuthority().isEmpty()) {
throw new RuntimeException("Client certificate authority security/clients.pem is missing - see: https:
}
EndpointCertificateSecrets endpointCertificateSecrets = deployState.endpointCertificateSecrets().get();
HostedSslConnectorFactory connectorFactory = authorizeClient
? HostedSslConnectorFactory.withProvidedCertificateAndTruststore(proxyProtocol, serverName, endpointCertificateSecrets, deployState.tlsClientAuthority().get())
: HostedSslConnectorFactory.withProvidedCertificate(proxyProtocol, serverName, endpointCertificateSecrets);
server.addConnector(connectorFactory);
} else {
server.addConnector(HostedSslConnectorFactory.withDefaultCertificateAndTruststore(proxyProtocol, serverName));
}
}
private static boolean isHostedTenantApplication(ConfigModelContext context) {
var deployState = context.getDeployState();
boolean isTesterApplication = deployState.getProperties().applicationId().instance().isTester();
return deployState.isHosted() && context.getApplicationType() == ApplicationType.DEFAULT && !isTesterApplication;
}
private static void addHostedImplicitHttpIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
if(cluster.getHttp() == null) {
Http http = deployState.getProperties().athenzDomain()
.map(tenantDomain -> createHostedImplicitHttpWithAccessControl(deployState, tenantDomain, cluster))
.orElseGet(() -> createHostedImplicitHttpWithoutAccessControl(cluster));
cluster.setHttp(http);
}
if(cluster.getHttp().getHttpServer() == null) {
JettyHttpServer defaultHttpServer = new JettyHttpServer(new ComponentId("DefaultHttpServer"));
cluster.getHttp().setHttpServer(defaultHttpServer);
defaultHttpServer.addConnector(new ConnectorFactory("SearchServer", Defaults.getDefaults().vespaWebServicePort()));
}
}
private static Http createHostedImplicitHttpWithAccessControl(
DeployState deployState, AthenzDomain tenantDomain, ApplicationContainerCluster cluster) {
AccessControl accessControl =
new AccessControl.Builder(tenantDomain.value(), deployState.getDeployLogger())
.setHandlers(cluster)
.readEnabled(false)
.writeEnabled(false)
.build();
Http http = new Http(accessControl.getBindings(), accessControl);
FilterChains filterChains = new FilterChains(cluster);
filterChains.add(new Chain<>(FilterChains.emptyChainSpec(ACCESS_CONTROL_CHAIN_ID)));
http.setFilterChains(filterChains);
return http;
}
private static Http createHostedImplicitHttpWithoutAccessControl(ApplicationContainerCluster cluster) {
Http http = new Http(Collections.emptyList());
http.setFilterChains(new FilterChains(cluster));
return http;
}
private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement) {
Http http = new HttpBuilder().build(deployState, cluster, httpElement);
if (networking == Networking.disable)
http.removeAllServers();
return http;
}
private void addRestApis(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
for (Element restApiElem : XML.getChildren(spec, "rest-api")) {
cluster.addRestApi(
new RestApiBuilder().build(deployState, cluster, restApiElem));
}
}
private void addServlets(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
for (Element servletElem : XML.getChildren(spec, "servlet"))
cluster.addServlet(new ServletBuilder().build(deployState, cluster, servletElem));
}
private void addDocumentApi(Element spec, ApplicationContainerCluster cluster) {
ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec);
if (containerDocumentApi == null) return;
cluster.setDocumentApi(containerDocumentApi);
}
private void addDocproc(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec);
if (containerDocproc == null) return;
cluster.setDocproc(containerDocproc);
ContainerDocproc.Options docprocOptions = containerDocproc.options;
cluster.setMbusParams(new ApplicationContainerCluster.MbusParams(
docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory));
}
private void addSearch(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
Element searchElement = XML.getChild(spec, "search");
if (searchElement == null) return;
addIncludes(searchElement);
cluster.setSearch(buildSearch(deployState, cluster, searchElement));
addSearchHandler(cluster, searchElement);
addGUIHandler(cluster);
validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private void addModelEvaluation(Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element modelEvaluationElement = XML.getChild(spec, "model-evaluation");
if (modelEvaluationElement == null) return;
RankProfileList profiles =
context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty;
cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles));
}
private void addProcessing(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
Element processingElement = XML.getChild(spec, "processing");
if (processingElement == null) return;
addIncludes(processingElement);
cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement),
serverBindings(processingElement, ProcessingChains.defaultBindings));
validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private ContainerSearch buildSearch(DeployState deployState, ApplicationContainerCluster containerCluster, Element producerSpec) {
SearchChains searchChains = new DomSearchChainsBuilder(null, false).build(deployState, containerCluster, producerSpec);
ContainerSearch containerSearch = new ContainerSearch(containerCluster, searchChains, new ContainerSearch.Options());
applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch);
containerSearch.setQueryProfiles(deployState.getQueryProfiles());
containerSearch.setSemanticRules(deployState.getSemanticRules());
return containerSearch;
}
private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) {
PageTemplates.validate(applicationPackage);
containerSearch.setPageTemplates(PageTemplates.create(applicationPackage));
}
private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element component: XML.getChildren(spec, "handler")) {
cluster.addComponent(
new DomHandlerBuilder(cluster).build(deployState, cluster, component));
}
}
private void checkVersion(Element spec) {
String version = spec.getAttribute("version");
if ( ! Version.fromString(version).equals(new Version(1))) {
throw new RuntimeException("Expected container version to be 1.0, but got " + version);
}
}
private void checkTagName(Element spec, DeployLogger logger) {
if (spec.getTagName().equals(DEPRECATED_CONTAINER_TAG)) {
logger.log(WARNING, "'" + DEPRECATED_CONTAINER_TAG + "' is deprecated as tag name. Use '" + CONTAINER_TAG + "' instead.");
}
}
private void addNodes(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
if (standaloneBuilder)
addStandaloneNode(cluster);
else
addNodesFromXml(cluster, spec, context);
}
private void addStandaloneNode(ApplicationContainerCluster cluster) {
ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), cluster.isHostedVespa());
cluster.addContainers(Collections.singleton(container));
}
static boolean incompatibleGCOptions(String jvmargs) {
Pattern gcAlgorithm = Pattern.compile("-XX:[-+]Use.+GC");
Pattern cmsArgs = Pattern.compile("-XX:[-+]*CMS");
return (gcAlgorithm.matcher(jvmargs).find() ||cmsArgs.matcher(jvmargs).find());
}
private static String buildJvmGCOptions(Zone zone, String jvmGCOPtions, boolean isHostedVespa) {
if (jvmGCOPtions != null) {
return jvmGCOPtions;
} else if ((zone.system() == SystemName.dev) || isHostedVespa) {
return null;
} else {
return ContainerCluster.G1GC;
}
}
private static String getJvmOptions(ApplicationContainerCluster cluster, Element nodesElement, DeployLogger deployLogger) {
String jvmOptions = "";
if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS);
if (nodesElement.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) {
String jvmArgs = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME);
throw new IllegalArgumentException("You have specified both jvm-options='" + jvmOptions + "'" +
" and deprecated jvmargs='" + jvmArgs + "'. Merge jvmargs into jvm-options.");
}
} else {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME);
if (incompatibleGCOptions(jvmOptions)) {
deployLogger.log(WARNING, "You need to move out your GC related options from 'jvmargs' to 'jvm-gc-options'");
cluster.setJvmGCOptions(ContainerCluster.G1GC);
}
}
return jvmOptions;
}
void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployLogger()));
if (!cluster.getJvmGCOptions().isPresent()) {
String jvmGCOptions = nodesElement.hasAttribute(VespaDomBuilder.JVM_GC_OPTIONS)
? nodesElement.getAttribute(VespaDomBuilder.JVM_GC_OPTIONS)
: null;
cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState().zone(), jvmGCOptions, context.getDeployState().isHosted()));
}
applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
}
void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element jvmElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, jvmElement.getAttribute(VespaDomBuilder.OPTIONS));
applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
String jvmGCOptions = jvmElement.hasAttribute(VespaDomBuilder.GC_OPTIONS)
? jvmElement.getAttribute(VespaDomBuilder.GC_OPTIONS)
: null;
cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState().zone(), jvmGCOptions, context.getDeployState().isHosted()));
}
private void addNodesFromXml(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) {
Element nodesElement = XML.getChild(containerElement, "nodes");
if (nodesElement == null) {
cluster.addContainers(allocateWithoutNodesTag(cluster, containerElement, context));
} else {
List<ApplicationContainer> nodes = createNodes(cluster, nodesElement, context);
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) {
extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context);
} else {
extractJvmTag(nodes, cluster, jvmElement, context);
}
applyRoutingAliasProperties(nodes, cluster);
applyDefaultPreload(nodes, nodesElement);
String environmentVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT));
if (environmentVars != null && !environmentVars.isEmpty()) {
cluster.setEnvironmentVars(environmentVars);
}
if (useCpuSocketAffinity(nodesElement))
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
}
private static String getEnvironmentVariables(Element environmentVariables) {
StringBuilder sb = new StringBuilder();
if (environmentVariables != null) {
for (Element var: XML.getChildren(environmentVariables)) {
sb.append(var.getNodeName()).append('=').append(var.getTextContent()).append(' ');
}
}
return sb.toString();
}
private List<ApplicationContainer> createNodes(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
if (nodesElement.hasAttribute("type"))
return createNodesFromNodeType(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("of"))
return createNodesFromContentServiceReference(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("count"))
return createNodesFromNodeCount(cluster, nodesElement, context);
else if (cluster.isHostedVespa() && cluster.getZone().environment().isManuallyDeployed())
return createNodesFromNodeCount(cluster, nodesElement, context);
else
return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement);
}
private static void applyRoutingAliasProperties(List<ApplicationContainer> result, ApplicationContainerCluster cluster) {
if (!cluster.serviceAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("servicealiases", cluster.serviceAliases().stream().collect(Collectors.joining(",")));
});
}
if (!cluster.endpointAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("endpointaliases", cluster.endpointAliases().stream().collect(Collectors.joining(",")));
});
}
}
private static void applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) {
if (memoryPercentage == null || memoryPercentage.isEmpty()) return;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
try {
cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage));
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
}
}
/** Allocate a container cluster without a nodes tag */
private List<ApplicationContainer> singleHostContainerCluster(ApplicationContainerCluster cluster, HostResource host, ConfigModelContext context) {
ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, cluster.isHostedVespa());
node.setHostResource(host);
node.initService(context.getDeployLogger());
return List.of(node);
}
private List<ApplicationContainer> createNodesFromNodeCount(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().hostSystem(),
ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
log);
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
private List<ApplicationContainer> createNodesFromNodeType(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodeType type = NodeType.valueOf(nodesElement.getAttribute("type"));
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(context.getDeployState().getWantedNodeVespaVersion())
.dockerImageRepo(context.getDeployState().getWantedDockerImageRepo())
.build();
Map<HostResource, ClusterMembership> hosts =
cluster.getRoot().hostSystem().allocateHosts(clusterSpec,
Capacity.fromRequiredNodeType(type), 1, log);
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
private List<ApplicationContainer> createNodesFromContentServiceReference(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodeSpecification;
try {
nodeSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(cluster + " contains an invalid reference", e);
}
String referenceId = nodesElement.getAttribute("of");
cluster.setHostClusterId(referenceId);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodeSpecification,
referenceId,
cluster.getRoot().hostSystem(),
context.getDeployLogger());
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
/**
* This is used in case we are on hosted Vespa and no nodes tag is supplied:
* If there are content clusters this will pick the first host in the first cluster as the container node.
* If there are no content clusters this will return empty (such that the node can be created by the container here).
*/
private Optional<HostResource> getHostResourceFromContentClusters(ApplicationContainerCluster cluster, Element containersElement, ConfigModelContext context) {
Optional<Element> services = servicesRootOf(containersElement);
if ( ! services.isPresent())
return Optional.empty();
List<Element> contentServices = XML.getChildren(services.get(), "content");
if ( contentServices.isEmpty() ) return Optional.empty();
Element contentNodesElementOrNull = XML.getChild(contentServices.get(0), "nodes");
NodesSpecification nodesSpec;
if (contentNodesElementOrNull == null)
nodesSpec = NodesSpecification.nonDedicated(1, context);
else
nodesSpec = NodesSpecification.from(new ModelElement(contentNodesElementOrNull), context);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodesSpec,
contentServices.get(0).getAttribute("id"),
cluster.getRoot().hostSystem(),
context.getDeployLogger());
return Optional.of(hosts.keySet().iterator().next());
}
/** Returns the services element above the given Element, or empty if there is no services element */
private Optional<Element> servicesRootOf(Element element) {
Node parent = element.getParentNode();
if (parent == null) return Optional.empty();
if ( ! (parent instanceof Element)) return Optional.empty();
Element parentElement = (Element)parent;
if (parentElement.getTagName().equals("services")) return Optional.of(parentElement);
return servicesRootOf(parentElement);
}
private List<ApplicationContainer> createNodesFromHosts(DeployLogger deployLogger, Map<HostResource, ClusterMembership> hosts, ApplicationContainerCluster cluster) {
List<ApplicationContainer> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), cluster.isHostedVespa());
container.setHostResource(entry.getKey());
container.initService(deployLogger);
nodes.add(container);
}
return nodes;
}
private List<ApplicationContainer> createNodesFromNodeList(DeployState deployState, ApplicationContainerCluster cluster, Element nodesElement) {
List<ApplicationContainer> nodes = new ArrayList<>();
int nodeIndex = 0;
for (Element nodeElem: XML.getChildren(nodesElement, "node")) {
nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem));
nodeIndex++;
}
return nodes;
}
private static boolean useCpuSocketAffinity(Element nodesElement) {
if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME))
return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME));
else
return false;
}
private static void applyNodesTagJvmArgs(List<ApplicationContainer> containers, String jvmArgs) {
for (Container container: containers) {
if (container.getAssignedJvmOptions().isEmpty())
container.prependJvmOptions(jvmArgs);
}
}
private static void applyDefaultPreload(List<ApplicationContainer> containers, Element nodesElement) {
if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return;
for (Container container: containers)
container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME));
}
private void addSearchHandler(ApplicationContainerCluster cluster, Element searchElement) {
cluster.addComponent(new ProcessingHandler<>(cluster.getSearch().getChains(),
"com.yahoo.search.searchchain.ExecutionFactory"));
ProcessingHandler<SearchChains> searchHandler = new ProcessingHandler<>(cluster.getSearch().getChains(),
"com.yahoo.search.handler.SearchHandler");
String[] defaultBindings = {SEARCH_HANDLER_BINDING};
for (String binding: serverBindings(searchElement, defaultBindings)) {
searchHandler.addServerBindings(binding);
}
cluster.addComponent(searchHandler);
}
private void addGUIHandler(ApplicationContainerCluster cluster) {
Handler<?> guiHandler = new GUIHandler();
guiHandler.addServerBindings("http:
cluster.addComponent(guiHandler);
}
private String[] serverBindings(Element searchElement, String... defaultBindings) {
List<Element> bindings = XML.getChildren(searchElement, "binding");
if (bindings.isEmpty())
return defaultBindings;
return toBindingList(bindings);
}
private String[] toBindingList(List<Element> bindingElements) {
List<String> result = new ArrayList<>();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
result.add(text);
}
return result.toArray(new String[result.size()]);
}
private ContainerDocumentApi buildDocumentApi(ApplicationContainerCluster cluster, Element spec) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
return new ContainerDocumentApi(cluster, documentApiOptions);
}
private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
Element docprocElement = XML.getChild(spec, "document-processing");
if (docprocElement == null)
return null;
addIncludes(docprocElement);
DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement);
ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement);
return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder);
}
private void addIncludes(Element parentElement) {
List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE);
if (includes == null || includes.isEmpty()) {
return;
}
if (app == null) {
throw new IllegalArgumentException("Element <include> given in XML config, but no application package given.");
}
for (Element include : includes) {
addInclude(parentElement, include);
}
}
private void addInclude(Element parentElement, Element include) {
String dirName = include.getAttribute(IncludeDirs.DIR);
app.validateIncludeDir(dirName);
List<Element> includedFiles = Xml.allElemsFromPath(app, dirName);
for (Element includedFile : includedFiles) {
List<Element> includedSubElements = XML.getChildren(includedFile);
for (Element includedSubElement : includedSubElements) {
Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true);
parentElement.appendChild(copiedNode);
}
}
}
private static void addConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster,
Element spec, String componentName) {
for (Element node : XML.getChildren(spec, componentName)) {
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private static void validateAndAddConfiguredComponents(DeployState deployState,
ContainerCluster<? extends Container> cluster,
Element spec, String componentName,
Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private void addIdentityProvider(ApplicationContainerCluster cluster,
List<ConfigServerSpec> configServerSpecs,
HostName loadBalancerName,
URI ztsUrl,
String athenzDnsSuffix,
Zone zone,
DeploymentSpec spec) {
spec.athenzDomain()
.ifPresent(domain -> {
AthenzService service = spec.instance(app.getApplicationId().instance())
.flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region()))
.or(() -> spec.athenzService())
.orElseThrow(() -> new RuntimeException("Missing Athenz service configuration in instance '" + app.getApplicationId().instance() + "'"));
String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix;
IdentityProvider identityProvider = new IdentityProvider(domain, service, getLoadBalancerName(loadBalancerName, configServerSpecs), ztsUrl, zoneDnsSuffix, zone);
cluster.addComponent(identityProvider);
cluster.getContainers().forEach(container -> {
container.setProp("identity.domain", domain.value());
container.setProp("identity.service", service.value());
});
});
}
private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) {
return Optional.ofNullable(loadbalancerName)
.orElseGet(
() -> HostName.from(configServerSpecs.stream()
.findFirst()
.map(ConfigServerSpec::getHostName)
.orElse("unknown")
));
}
/** Disallow renderers named "XmlRenderer" or "JsonRenderer" */
private static void validateRendererElement(Element element) {
String idAttr = element.getAttribute("id");
if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) {
throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr));
}
}
public static boolean isContainerTag(Element element) {
return CONTAINER_TAG.equals(element.getTagName()) || DEPRECATED_CONTAINER_TAG.equals(element.getTagName());
}
} | |
This will result in tokens like "yum" "install" "--enablerepo=" "non-core" "--asumeyes" "nano" which it parses as enable repo empty string, and `non-core` as yum command. | public boolean installFixedVersion(TaskContext context, YumPackageName yumPackage, String... repos) {
String targetVersionLockName = yumPackage.toVersionLockName();
boolean alreadyLocked = terminal
.newCommandLine(context)
.add("yum", "--quiet", "versionlock", "list")
.executeSilently()
.getOutputLinesStream()
.map(YumPackageName::parseString)
.filter(Optional::isPresent)
.map(Optional::get)
.anyMatch(packageName -> {
if (packageName.getName().equals(yumPackage.getName())) {
String versionLockName = packageName.toVersionLockName();
if (versionLockName.equals(targetVersionLockName)) {
return true;
} else {
terminal.newCommandLine(context)
.add("yum", "versionlock", "delete", versionLockName)
.execute();
}
}
return false;
});
boolean modified = false;
if (!alreadyLocked) {
terminal.newCommandLine(context)
.add("yum", "versionlock", "add", targetVersionLockName)
.execute();
modified = true;
}
CommandLine commandLine = terminal.newCommandLine(context).add("yum", "install");
for (String repo : repos) commandLine.add("--enablerepo=", repo);
commandLine.add("--assumeyes", yumPackage.toName());
String output = commandLine.executeSilently().getUntrimmedOutput();
if (NOTHING_TO_DO_PATTERN.matcher(output).find()) {
if (CHECKING_FOR_UPDATE_PATTERN.matcher(output).find()) {
terminal.newCommandLine(context)
.add("yum", "downgrade", "--assumeyes", yumPackage.toName())
.execute();
modified = true;
} else {
}
} else {
commandLine.recordSilentExecutionAsSystemModification();
modified = true;
}
return modified;
} | for (String repo : repos) commandLine.add("--enablerepo=", repo); | public boolean installFixedVersion(TaskContext context, YumPackageName yumPackage, String... repos) {
String targetVersionLockName = yumPackage.toVersionLockName();
boolean alreadyLocked = terminal
.newCommandLine(context)
.add("yum", "--quiet", "versionlock", "list")
.executeSilently()
.getOutputLinesStream()
.map(YumPackageName::parseString)
.filter(Optional::isPresent)
.map(Optional::get)
.anyMatch(packageName -> {
if (packageName.getName().equals(yumPackage.getName())) {
String versionLockName = packageName.toVersionLockName();
if (versionLockName.equals(targetVersionLockName)) {
return true;
} else {
terminal.newCommandLine(context)
.add("yum", "versionlock", "delete", versionLockName)
.execute();
}
}
return false;
});
boolean modified = false;
if (!alreadyLocked) {
terminal.newCommandLine(context)
.add("yum", "versionlock", "add", targetVersionLockName)
.execute();
modified = true;
}
var installCommand = terminal.newCommandLine(context).add("yum", "install");
for (String repo : repos) installCommand.add("--enablerepo=" + repo);
installCommand.add("--assumeyes", yumPackage.toName());
String output = installCommand.executeSilently().getUntrimmedOutput();
if (NOTHING_TO_DO_PATTERN.matcher(output).find()) {
if (CHECKING_FOR_UPDATE_PATTERN.matcher(output).find()) {
var upgradeCommand = terminal.newCommandLine(context).add("yum", "downgrade", "--assumeyes");
for (String repo : repos) upgradeCommand.add("--enablerepo=" + repo);
upgradeCommand.add(yumPackage.toName()).execute();
modified = true;
} else {
}
} else {
installCommand.recordSilentExecutionAsSystemModification();
modified = true;
}
return modified;
} | class Yum {
private static final Pattern CHECKING_FOR_UPDATE_PATTERN =
Pattern.compile("(?dm)^Package matching [^ ]+ already installed\\. Checking for update\\.$");
private static final Pattern NOTHING_TO_DO_PATTERN = Pattern.compile("(?dm)^Nothing to do$");
private static final Pattern INSTALL_NOOP_PATTERN = NOTHING_TO_DO_PATTERN;
private static final Pattern UPGRADE_NOOP_PATTERN = Pattern.compile("(?dm)^No packages marked for update$");
private static final Pattern REMOVE_NOOP_PATTERN = Pattern.compile("(?dm)^No Packages marked for removal$");
private static final Pattern UNKNOWN_PACKAGE_PATTERN = Pattern.compile(
"(?dm)^No package ([^ ]+) available\\.$");
private static final String RPM_QUERYFORMAT = Stream.of("NAME", "EPOCH", "VERSION", "RELEASE", "ARCH")
.map(formatter -> "%{" + formatter + "}")
.collect(Collectors.joining("\\n"));
private static final Function<YumPackageName.Builder, List<Function<String, YumPackageName.Builder>>>
PACKAGE_NAME_BUILDERS_GENERATOR = builder -> List.of(
builder::setName, builder::setEpoch, builder::setVersion, builder::setRelease, builder::setArchitecture);
private final Terminal terminal;
public Yum(Terminal terminal) {
this.terminal = terminal;
}
public Optional<YumPackageName> queryInstalled(TaskContext context, String packageName) {
CommandResult commandResult = terminal.newCommandLine(context)
.add("rpm", "-q", packageName, "--queryformat", RPM_QUERYFORMAT)
.ignoreExitCode()
.executeSilently();
if (commandResult.getExitCode() != 0) return Optional.empty();
YumPackageName.Builder builder = new YumPackageName.Builder();
List<Function<String, YumPackageName.Builder>> builders = PACKAGE_NAME_BUILDERS_GENERATOR.apply(builder);
List<Optional<String>> lines = commandResult.mapEachLine(line -> Optional.of(line).filter(s -> !"(none)".equals(s)));
if (lines.size() != builders.size()) throw new IllegalStateException(String.format(
"Unexpected response from rpm, expected %d lines, got %s", builders.size(), commandResult.getOutput()));
IntStream.range(0, builders.size()).forEach(i -> lines.get(i).ifPresent(builders.get(i)::apply));
return Optional.of(builder.build());
}
/**
* Lock and install, or if necessary downgrade, a package to a given version.
*
* @return false only if the package was already locked and installed at the given version (no-op)
*/
public GenericYumCommand install(YumPackageName... packages) {
return newYumCommand("install", packages, INSTALL_NOOP_PATTERN);
}
public GenericYumCommand install(String package1, String... packages) {
return install(toYumPackageNameArray(package1, packages));
}
public GenericYumCommand install(List<String> packages) {
return install(packages.stream().map(YumPackageName::fromString).toArray(YumPackageName[]::new));
}
public GenericYumCommand upgrade(YumPackageName... packages) {
return newYumCommand("upgrade", packages, UPGRADE_NOOP_PATTERN);
}
public GenericYumCommand upgrade(String package1, String... packages) {
return upgrade(toYumPackageNameArray(package1, packages));
}
public GenericYumCommand upgrade(List<String> packages) {
return upgrade(packages.stream().map(YumPackageName::fromString).toArray(YumPackageName[]::new));
}
public GenericYumCommand remove(YumPackageName... packages) {
return newYumCommand("remove", packages, REMOVE_NOOP_PATTERN);
}
public GenericYumCommand remove(String package1, String... packages) {
return remove(toYumPackageNameArray(package1, packages));
}
public GenericYumCommand remove(List<String> packages) {
return remove(packages.stream().map(YumPackageName::fromString).toArray(YumPackageName[]::new));
}
static YumPackageName[] toYumPackageNameArray(String package1, String... packages) {
YumPackageName[] array = new YumPackageName[1 + packages.length];
array[0] = YumPackageName.fromString(package1);
for (int i = 0; i < packages.length; ++i) {
array[1 + i] = YumPackageName.fromString(packages[i]);
}
return array;
}
private GenericYumCommand newYumCommand(String yumCommand, YumPackageName[] packages, Pattern noopPattern) {
return new GenericYumCommand(terminal, yumCommand, List.of(packages), noopPattern);
}
public static class GenericYumCommand {
private final Terminal terminal;
private final String yumCommand;
private final List<YumPackageName> packages;
private final Pattern commandOutputNoopPattern;
private final List<String> enabledRepo = new ArrayList<>();
private GenericYumCommand(Terminal terminal,
String yumCommand,
List<YumPackageName> packages,
Pattern commandOutputNoopPattern) {
this.terminal = terminal;
this.yumCommand = yumCommand;
this.packages = packages;
this.commandOutputNoopPattern = commandOutputNoopPattern;
if (packages.isEmpty() && ! "upgrade".equals(yumCommand)) {
throw new IllegalArgumentException("No packages specified");
}
}
public GenericYumCommand enableRepos(String... repos) {
enabledRepo.addAll(List.of(repos));
return this;
}
public boolean converge(TaskContext context) {
CommandLine commandLine = terminal.newCommandLine(context);
commandLine.add("yum", yumCommand, "--assumeyes");
enabledRepo.forEach(repo -> commandLine.add("--enablerepo=" + repo));
commandLine.add(packages.stream().map(YumPackageName::toName).collect(Collectors.toList()));
boolean modifiedSystem = commandLine
.executeSilently()
.mapOutput(this::mapOutput);
if (modifiedSystem) {
commandLine.recordSilentExecutionAsSystemModification();
}
return modifiedSystem;
}
private boolean mapOutput(String output) {
Matcher unknownPackageMatcher = UNKNOWN_PACKAGE_PATTERN.matcher(output);
if (unknownPackageMatcher.find()) {
throw new IllegalArgumentException("Unknown package: " + unknownPackageMatcher.group(1));
}
return !commandOutputNoopPattern.matcher(output).find();
}
}
} | class Yum {
private static final Pattern CHECKING_FOR_UPDATE_PATTERN =
Pattern.compile("(?dm)^Package matching [^ ]+ already installed\\. Checking for update\\.$");
private static final Pattern NOTHING_TO_DO_PATTERN = Pattern.compile("(?dm)^Nothing to do$");
private static final Pattern INSTALL_NOOP_PATTERN = NOTHING_TO_DO_PATTERN;
private static final Pattern UPGRADE_NOOP_PATTERN = Pattern.compile("(?dm)^No packages marked for update$");
private static final Pattern REMOVE_NOOP_PATTERN = Pattern.compile("(?dm)^No Packages marked for removal$");
private static final Pattern UNKNOWN_PACKAGE_PATTERN = Pattern.compile(
"(?dm)^No package ([^ ]+) available\\.$");
private static final String RPM_QUERYFORMAT = Stream.of("NAME", "EPOCH", "VERSION", "RELEASE", "ARCH")
.map(formatter -> "%{" + formatter + "}")
.collect(Collectors.joining("\\n"));
private static final Function<YumPackageName.Builder, List<Function<String, YumPackageName.Builder>>>
PACKAGE_NAME_BUILDERS_GENERATOR = builder -> List.of(
builder::setName, builder::setEpoch, builder::setVersion, builder::setRelease, builder::setArchitecture);
private final Terminal terminal;
public Yum(Terminal terminal) {
this.terminal = terminal;
}
public Optional<YumPackageName> queryInstalled(TaskContext context, String packageName) {
CommandResult commandResult = terminal.newCommandLine(context)
.add("rpm", "-q", packageName, "--queryformat", RPM_QUERYFORMAT)
.ignoreExitCode()
.executeSilently();
if (commandResult.getExitCode() != 0) return Optional.empty();
YumPackageName.Builder builder = new YumPackageName.Builder();
List<Function<String, YumPackageName.Builder>> builders = PACKAGE_NAME_BUILDERS_GENERATOR.apply(builder);
List<Optional<String>> lines = commandResult.mapEachLine(line -> Optional.of(line).filter(s -> !"(none)".equals(s)));
if (lines.size() != builders.size()) throw new IllegalStateException(String.format(
"Unexpected response from rpm, expected %d lines, got %s", builders.size(), commandResult.getOutput()));
IntStream.range(0, builders.size()).forEach(i -> lines.get(i).ifPresent(builders.get(i)::apply));
return Optional.of(builder.build());
}
/**
* Lock and install, or if necessary downgrade, a package to a given version.
*
* @return false only if the package was already locked and installed at the given version (no-op)
*/
public GenericYumCommand install(YumPackageName... packages) {
return newYumCommand("install", packages, INSTALL_NOOP_PATTERN);
}
public GenericYumCommand install(String package1, String... packages) {
return install(toYumPackageNameArray(package1, packages));
}
public GenericYumCommand install(List<String> packages) {
return install(packages.stream().map(YumPackageName::fromString).toArray(YumPackageName[]::new));
}
public GenericYumCommand upgrade(YumPackageName... packages) {
return newYumCommand("upgrade", packages, UPGRADE_NOOP_PATTERN);
}
public GenericYumCommand upgrade(String package1, String... packages) {
return upgrade(toYumPackageNameArray(package1, packages));
}
public GenericYumCommand upgrade(List<String> packages) {
return upgrade(packages.stream().map(YumPackageName::fromString).toArray(YumPackageName[]::new));
}
public GenericYumCommand remove(YumPackageName... packages) {
return newYumCommand("remove", packages, REMOVE_NOOP_PATTERN);
}
public GenericYumCommand remove(String package1, String... packages) {
return remove(toYumPackageNameArray(package1, packages));
}
public GenericYumCommand remove(List<String> packages) {
return remove(packages.stream().map(YumPackageName::fromString).toArray(YumPackageName[]::new));
}
static YumPackageName[] toYumPackageNameArray(String package1, String... packages) {
YumPackageName[] array = new YumPackageName[1 + packages.length];
array[0] = YumPackageName.fromString(package1);
for (int i = 0; i < packages.length; ++i) {
array[1 + i] = YumPackageName.fromString(packages[i]);
}
return array;
}
private GenericYumCommand newYumCommand(String yumCommand, YumPackageName[] packages, Pattern noopPattern) {
return new GenericYumCommand(terminal, yumCommand, List.of(packages), noopPattern);
}
public static class GenericYumCommand {
private final Terminal terminal;
private final String yumCommand;
private final List<YumPackageName> packages;
private final Pattern commandOutputNoopPattern;
private final List<String> enabledRepo = new ArrayList<>();
private GenericYumCommand(Terminal terminal,
String yumCommand,
List<YumPackageName> packages,
Pattern commandOutputNoopPattern) {
this.terminal = terminal;
this.yumCommand = yumCommand;
this.packages = packages;
this.commandOutputNoopPattern = commandOutputNoopPattern;
if (packages.isEmpty() && ! "upgrade".equals(yumCommand)) {
throw new IllegalArgumentException("No packages specified");
}
}
public GenericYumCommand enableRepos(String... repos) {
enabledRepo.addAll(List.of(repos));
return this;
}
public boolean converge(TaskContext context) {
CommandLine commandLine = terminal.newCommandLine(context);
commandLine.add("yum", yumCommand, "--assumeyes");
enabledRepo.forEach(repo -> commandLine.add("--enablerepo=" + repo));
commandLine.add(packages.stream().map(YumPackageName::toName).collect(Collectors.toList()));
boolean modifiedSystem = commandLine
.executeSilently()
.mapOutput(this::mapOutput);
if (modifiedSystem) {
commandLine.recordSilentExecutionAsSystemModification();
}
return modifiedSystem;
}
private boolean mapOutput(String output) {
Matcher unknownPackageMatcher = UNKNOWN_PACKAGE_PATTERN.matcher(output);
if (unknownPackageMatcher.find()) {
throw new IllegalArgumentException("Unknown package: " + unknownPackageMatcher.group(1));
}
return !commandOutputNoopPattern.matcher(output).find();
}
}
} |
These repos also need to be handled when downgrade is required (Line 131) | public boolean installFixedVersion(TaskContext context, YumPackageName yumPackage, String... repos) {
String targetVersionLockName = yumPackage.toVersionLockName();
boolean alreadyLocked = terminal
.newCommandLine(context)
.add("yum", "--quiet", "versionlock", "list")
.executeSilently()
.getOutputLinesStream()
.map(YumPackageName::parseString)
.filter(Optional::isPresent)
.map(Optional::get)
.anyMatch(packageName -> {
if (packageName.getName().equals(yumPackage.getName())) {
String versionLockName = packageName.toVersionLockName();
if (versionLockName.equals(targetVersionLockName)) {
return true;
} else {
terminal.newCommandLine(context)
.add("yum", "versionlock", "delete", versionLockName)
.execute();
}
}
return false;
});
boolean modified = false;
if (!alreadyLocked) {
terminal.newCommandLine(context)
.add("yum", "versionlock", "add", targetVersionLockName)
.execute();
modified = true;
}
CommandLine commandLine = terminal.newCommandLine(context).add("yum", "install");
for (String repo : repos) commandLine.add("--enablerepo=", repo);
commandLine.add("--assumeyes", yumPackage.toName());
String output = commandLine.executeSilently().getUntrimmedOutput();
if (NOTHING_TO_DO_PATTERN.matcher(output).find()) {
if (CHECKING_FOR_UPDATE_PATTERN.matcher(output).find()) {
terminal.newCommandLine(context)
.add("yum", "downgrade", "--assumeyes", yumPackage.toName())
.execute();
modified = true;
} else {
}
} else {
commandLine.recordSilentExecutionAsSystemModification();
modified = true;
}
return modified;
} | for (String repo : repos) commandLine.add("--enablerepo=", repo); | public boolean installFixedVersion(TaskContext context, YumPackageName yumPackage, String... repos) {
String targetVersionLockName = yumPackage.toVersionLockName();
boolean alreadyLocked = terminal
.newCommandLine(context)
.add("yum", "--quiet", "versionlock", "list")
.executeSilently()
.getOutputLinesStream()
.map(YumPackageName::parseString)
.filter(Optional::isPresent)
.map(Optional::get)
.anyMatch(packageName -> {
if (packageName.getName().equals(yumPackage.getName())) {
String versionLockName = packageName.toVersionLockName();
if (versionLockName.equals(targetVersionLockName)) {
return true;
} else {
terminal.newCommandLine(context)
.add("yum", "versionlock", "delete", versionLockName)
.execute();
}
}
return false;
});
boolean modified = false;
if (!alreadyLocked) {
terminal.newCommandLine(context)
.add("yum", "versionlock", "add", targetVersionLockName)
.execute();
modified = true;
}
var installCommand = terminal.newCommandLine(context).add("yum", "install");
for (String repo : repos) installCommand.add("--enablerepo=" + repo);
installCommand.add("--assumeyes", yumPackage.toName());
String output = installCommand.executeSilently().getUntrimmedOutput();
if (NOTHING_TO_DO_PATTERN.matcher(output).find()) {
if (CHECKING_FOR_UPDATE_PATTERN.matcher(output).find()) {
var upgradeCommand = terminal.newCommandLine(context).add("yum", "downgrade", "--assumeyes");
for (String repo : repos) upgradeCommand.add("--enablerepo=" + repo);
upgradeCommand.add(yumPackage.toName()).execute();
modified = true;
} else {
}
} else {
installCommand.recordSilentExecutionAsSystemModification();
modified = true;
}
return modified;
} | class Yum {
private static final Pattern CHECKING_FOR_UPDATE_PATTERN =
Pattern.compile("(?dm)^Package matching [^ ]+ already installed\\. Checking for update\\.$");
private static final Pattern NOTHING_TO_DO_PATTERN = Pattern.compile("(?dm)^Nothing to do$");
private static final Pattern INSTALL_NOOP_PATTERN = NOTHING_TO_DO_PATTERN;
private static final Pattern UPGRADE_NOOP_PATTERN = Pattern.compile("(?dm)^No packages marked for update$");
private static final Pattern REMOVE_NOOP_PATTERN = Pattern.compile("(?dm)^No Packages marked for removal$");
private static final Pattern UNKNOWN_PACKAGE_PATTERN = Pattern.compile(
"(?dm)^No package ([^ ]+) available\\.$");
private static final String RPM_QUERYFORMAT = Stream.of("NAME", "EPOCH", "VERSION", "RELEASE", "ARCH")
.map(formatter -> "%{" + formatter + "}")
.collect(Collectors.joining("\\n"));
private static final Function<YumPackageName.Builder, List<Function<String, YumPackageName.Builder>>>
PACKAGE_NAME_BUILDERS_GENERATOR = builder -> List.of(
builder::setName, builder::setEpoch, builder::setVersion, builder::setRelease, builder::setArchitecture);
private final Terminal terminal;
public Yum(Terminal terminal) {
this.terminal = terminal;
}
public Optional<YumPackageName> queryInstalled(TaskContext context, String packageName) {
CommandResult commandResult = terminal.newCommandLine(context)
.add("rpm", "-q", packageName, "--queryformat", RPM_QUERYFORMAT)
.ignoreExitCode()
.executeSilently();
if (commandResult.getExitCode() != 0) return Optional.empty();
YumPackageName.Builder builder = new YumPackageName.Builder();
List<Function<String, YumPackageName.Builder>> builders = PACKAGE_NAME_BUILDERS_GENERATOR.apply(builder);
List<Optional<String>> lines = commandResult.mapEachLine(line -> Optional.of(line).filter(s -> !"(none)".equals(s)));
if (lines.size() != builders.size()) throw new IllegalStateException(String.format(
"Unexpected response from rpm, expected %d lines, got %s", builders.size(), commandResult.getOutput()));
IntStream.range(0, builders.size()).forEach(i -> lines.get(i).ifPresent(builders.get(i)::apply));
return Optional.of(builder.build());
}
/**
* Lock and install, or if necessary downgrade, a package to a given version.
*
* @return false only if the package was already locked and installed at the given version (no-op)
*/
public GenericYumCommand install(YumPackageName... packages) {
return newYumCommand("install", packages, INSTALL_NOOP_PATTERN);
}
public GenericYumCommand install(String package1, String... packages) {
return install(toYumPackageNameArray(package1, packages));
}
public GenericYumCommand install(List<String> packages) {
return install(packages.stream().map(YumPackageName::fromString).toArray(YumPackageName[]::new));
}
public GenericYumCommand upgrade(YumPackageName... packages) {
return newYumCommand("upgrade", packages, UPGRADE_NOOP_PATTERN);
}
public GenericYumCommand upgrade(String package1, String... packages) {
return upgrade(toYumPackageNameArray(package1, packages));
}
public GenericYumCommand upgrade(List<String> packages) {
return upgrade(packages.stream().map(YumPackageName::fromString).toArray(YumPackageName[]::new));
}
public GenericYumCommand remove(YumPackageName... packages) {
return newYumCommand("remove", packages, REMOVE_NOOP_PATTERN);
}
public GenericYumCommand remove(String package1, String... packages) {
return remove(toYumPackageNameArray(package1, packages));
}
public GenericYumCommand remove(List<String> packages) {
return remove(packages.stream().map(YumPackageName::fromString).toArray(YumPackageName[]::new));
}
static YumPackageName[] toYumPackageNameArray(String package1, String... packages) {
YumPackageName[] array = new YumPackageName[1 + packages.length];
array[0] = YumPackageName.fromString(package1);
for (int i = 0; i < packages.length; ++i) {
array[1 + i] = YumPackageName.fromString(packages[i]);
}
return array;
}
private GenericYumCommand newYumCommand(String yumCommand, YumPackageName[] packages, Pattern noopPattern) {
return new GenericYumCommand(terminal, yumCommand, List.of(packages), noopPattern);
}
public static class GenericYumCommand {
private final Terminal terminal;
private final String yumCommand;
private final List<YumPackageName> packages;
private final Pattern commandOutputNoopPattern;
private final List<String> enabledRepo = new ArrayList<>();
private GenericYumCommand(Terminal terminal,
String yumCommand,
List<YumPackageName> packages,
Pattern commandOutputNoopPattern) {
this.terminal = terminal;
this.yumCommand = yumCommand;
this.packages = packages;
this.commandOutputNoopPattern = commandOutputNoopPattern;
if (packages.isEmpty() && ! "upgrade".equals(yumCommand)) {
throw new IllegalArgumentException("No packages specified");
}
}
public GenericYumCommand enableRepos(String... repos) {
enabledRepo.addAll(List.of(repos));
return this;
}
public boolean converge(TaskContext context) {
CommandLine commandLine = terminal.newCommandLine(context);
commandLine.add("yum", yumCommand, "--assumeyes");
enabledRepo.forEach(repo -> commandLine.add("--enablerepo=" + repo));
commandLine.add(packages.stream().map(YumPackageName::toName).collect(Collectors.toList()));
boolean modifiedSystem = commandLine
.executeSilently()
.mapOutput(this::mapOutput);
if (modifiedSystem) {
commandLine.recordSilentExecutionAsSystemModification();
}
return modifiedSystem;
}
private boolean mapOutput(String output) {
Matcher unknownPackageMatcher = UNKNOWN_PACKAGE_PATTERN.matcher(output);
if (unknownPackageMatcher.find()) {
throw new IllegalArgumentException("Unknown package: " + unknownPackageMatcher.group(1));
}
return !commandOutputNoopPattern.matcher(output).find();
}
}
} | class Yum {
private static final Pattern CHECKING_FOR_UPDATE_PATTERN =
Pattern.compile("(?dm)^Package matching [^ ]+ already installed\\. Checking for update\\.$");
private static final Pattern NOTHING_TO_DO_PATTERN = Pattern.compile("(?dm)^Nothing to do$");
private static final Pattern INSTALL_NOOP_PATTERN = NOTHING_TO_DO_PATTERN;
private static final Pattern UPGRADE_NOOP_PATTERN = Pattern.compile("(?dm)^No packages marked for update$");
private static final Pattern REMOVE_NOOP_PATTERN = Pattern.compile("(?dm)^No Packages marked for removal$");
private static final Pattern UNKNOWN_PACKAGE_PATTERN = Pattern.compile(
"(?dm)^No package ([^ ]+) available\\.$");
private static final String RPM_QUERYFORMAT = Stream.of("NAME", "EPOCH", "VERSION", "RELEASE", "ARCH")
.map(formatter -> "%{" + formatter + "}")
.collect(Collectors.joining("\\n"));
private static final Function<YumPackageName.Builder, List<Function<String, YumPackageName.Builder>>>
PACKAGE_NAME_BUILDERS_GENERATOR = builder -> List.of(
builder::setName, builder::setEpoch, builder::setVersion, builder::setRelease, builder::setArchitecture);
private final Terminal terminal;
public Yum(Terminal terminal) {
this.terminal = terminal;
}
public Optional<YumPackageName> queryInstalled(TaskContext context, String packageName) {
CommandResult commandResult = terminal.newCommandLine(context)
.add("rpm", "-q", packageName, "--queryformat", RPM_QUERYFORMAT)
.ignoreExitCode()
.executeSilently();
if (commandResult.getExitCode() != 0) return Optional.empty();
YumPackageName.Builder builder = new YumPackageName.Builder();
List<Function<String, YumPackageName.Builder>> builders = PACKAGE_NAME_BUILDERS_GENERATOR.apply(builder);
List<Optional<String>> lines = commandResult.mapEachLine(line -> Optional.of(line).filter(s -> !"(none)".equals(s)));
if (lines.size() != builders.size()) throw new IllegalStateException(String.format(
"Unexpected response from rpm, expected %d lines, got %s", builders.size(), commandResult.getOutput()));
IntStream.range(0, builders.size()).forEach(i -> lines.get(i).ifPresent(builders.get(i)::apply));
return Optional.of(builder.build());
}
/**
* Lock and install, or if necessary downgrade, a package to a given version.
*
* @return false only if the package was already locked and installed at the given version (no-op)
*/
public GenericYumCommand install(YumPackageName... packages) {
return newYumCommand("install", packages, INSTALL_NOOP_PATTERN);
}
public GenericYumCommand install(String package1, String... packages) {
return install(toYumPackageNameArray(package1, packages));
}
public GenericYumCommand install(List<String> packages) {
return install(packages.stream().map(YumPackageName::fromString).toArray(YumPackageName[]::new));
}
public GenericYumCommand upgrade(YumPackageName... packages) {
return newYumCommand("upgrade", packages, UPGRADE_NOOP_PATTERN);
}
public GenericYumCommand upgrade(String package1, String... packages) {
return upgrade(toYumPackageNameArray(package1, packages));
}
public GenericYumCommand upgrade(List<String> packages) {
return upgrade(packages.stream().map(YumPackageName::fromString).toArray(YumPackageName[]::new));
}
public GenericYumCommand remove(YumPackageName... packages) {
return newYumCommand("remove", packages, REMOVE_NOOP_PATTERN);
}
public GenericYumCommand remove(String package1, String... packages) {
return remove(toYumPackageNameArray(package1, packages));
}
public GenericYumCommand remove(List<String> packages) {
return remove(packages.stream().map(YumPackageName::fromString).toArray(YumPackageName[]::new));
}
static YumPackageName[] toYumPackageNameArray(String package1, String... packages) {
YumPackageName[] array = new YumPackageName[1 + packages.length];
array[0] = YumPackageName.fromString(package1);
for (int i = 0; i < packages.length; ++i) {
array[1 + i] = YumPackageName.fromString(packages[i]);
}
return array;
}
private GenericYumCommand newYumCommand(String yumCommand, YumPackageName[] packages, Pattern noopPattern) {
return new GenericYumCommand(terminal, yumCommand, List.of(packages), noopPattern);
}
public static class GenericYumCommand {
private final Terminal terminal;
private final String yumCommand;
private final List<YumPackageName> packages;
private final Pattern commandOutputNoopPattern;
private final List<String> enabledRepo = new ArrayList<>();
private GenericYumCommand(Terminal terminal,
String yumCommand,
List<YumPackageName> packages,
Pattern commandOutputNoopPattern) {
this.terminal = terminal;
this.yumCommand = yumCommand;
this.packages = packages;
this.commandOutputNoopPattern = commandOutputNoopPattern;
if (packages.isEmpty() && ! "upgrade".equals(yumCommand)) {
throw new IllegalArgumentException("No packages specified");
}
}
public GenericYumCommand enableRepos(String... repos) {
enabledRepo.addAll(List.of(repos));
return this;
}
public boolean converge(TaskContext context) {
CommandLine commandLine = terminal.newCommandLine(context);
commandLine.add("yum", yumCommand, "--assumeyes");
enabledRepo.forEach(repo -> commandLine.add("--enablerepo=" + repo));
commandLine.add(packages.stream().map(YumPackageName::toName).collect(Collectors.toList()));
boolean modifiedSystem = commandLine
.executeSilently()
.mapOutput(this::mapOutput);
if (modifiedSystem) {
commandLine.recordSilentExecutionAsSystemModification();
}
return modifiedSystem;
}
private boolean mapOutput(String output) {
Matcher unknownPackageMatcher = UNKNOWN_PACKAGE_PATTERN.matcher(output);
if (unknownPackageMatcher.find()) {
throw new IllegalArgumentException("Unknown package: " + unknownPackageMatcher.group(1));
}
return !commandOutputNoopPattern.matcher(output).find();
}
}
} |
I think this line is for debugging. Remove this. | public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
return null;
}
if (template == null || template.length() == 0) {
warn("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
warn("No valid operation found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
System.out.println(docId);
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warn("No valid document operation could be created.", PigWarning.UDF_WARNING_1);
return null;
}
} catch (Exception e) {
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(ExceptionUtils.getStackTrace(e));
warn(sb.toString(), PigWarning.UDF_WARNING_1);
return null;
}
return json;
} | System.out.println(docId); | public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warnLog("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
warnLog("No valid operation found. Skipping.", PigWarning.UDF_WARNING_2);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warnLog("No valid document operation could be created.", PigWarning.UDF_WARNING_3);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(getStackTraceAsString(e));
warnLog(sb.toString(), PigWarning.UDF_WARNING_4);
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 1);
}
return json;
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String ADD_TENSOR_FIELDS = "add-tensor-fields";
private static final String REMOVE_BAG_AS_MAP_FIELDS = "remove-bag-as-map-fields";
private static final String ADD_BAG_AS_MAP_FIELDS = "add-bag-as-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private final String template;
private final Operation operation;
private final Properties properties;
public VespaDocumentOperation(String... params) {
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
}
@Override
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
@SuppressWarnings("unchecked")
private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
if (isPartialOperation(REMOVE_BAG_AS_MAP_FIELDS, name, properties, g, PARTIAL_UPDATE_REMOVE, false) ||
isPartialOperation(ADD_BAG_AS_MAP_FIELDS, name, properties, g, PARTIAL_UPDATE_ASSIGN, false)){
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (isPartialOperation(REMOVE_BAG_AS_MAP_FIELDS, name, properties, g, PARTIAL_UPDATE_REMOVE, false)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
}else{
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth+1);
} else {
writeValue(v, t, g, name, properties, valueSchema, op, depth+1);
}
}
}
}
}else{
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
writeTensor(map, g, isRemoveTensor(name, properties));
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth+1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth+1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth+1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
if (!isPartialOperation(REMOVE_TENSOR_FIELDS, name, properties, g, PARTIAL_UPDATE_REMOVE, true)
&& !isPartialOperation(REMOVE_BAG_AS_MAP_FIELDS, name, properties, g, PARTIAL_UPDATE_REMOVE, true)
&& !isPartialOperation(ADD_TENSOR_FIELDS, name, properties, g, PARTIAL_UPDATE_ADD, true)) {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean isPartialOperation(String label, String name, Properties properties, JsonGenerator g, String targetOperation, boolean writeFieldName) throws IOException{
boolean isPartialOperation = false;
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
if (writeFieldName) {
g.writeFieldName(targetOperation);
}
isPartialOperation = true;
}
}
return isPartialOperation;
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(ADD_BAG_AS_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null){
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if(simpleObjectFields != null){
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(ADD_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties){
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g, Boolean isRemoveTensor) throws IOException {
if (!isRemoveTensor){
g.writeFieldName("cells");
}else{
g.writeFieldName("address");
}
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
if (!isRemoveTensor){
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}else{
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
}
g.writeEndArray();
}
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String UPDATE_TENSOR_FIELDS = "update-tensor-fields";
private static final String REMOVE_MAP_FIELDS = "remove-map-fields";
private static final String UPDATE_MAP_FIELDS = "update-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static Map<String, String> mapPartialOperationMap;
static {
mapPartialOperationMap = new HashMap<>();
mapPartialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
mapPartialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private static Map<String, String> partialOperationMap;
static {
partialOperationMap = new HashMap<>();
partialOperationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
partialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 0);
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
}
@Override
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
private static String getPartialOperation(Map<String, String> operationMap, String name, Properties properties) {
for (String label : operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
return operationMap.get(label);
}
}
}
return null;
}
@SuppressWarnings("unchecked")
private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
String operation = getPartialOperation(mapPartialOperationMap, name, properties);
if (operation != null) {
writePartialUpdateAndRemoveMap(name, value, g, properties, schema, op, depth, operation);
} else {
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
}
private static void writePartialUpdateAndRemoveMap(String name, Object value, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth, String operation) throws IOException {
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (operation.equals(PARTIAL_UPDATE_REMOVE)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
} else {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if (isRemoveTensor(name, properties)) {
writeRemoveTensor(map, g);
} else {
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth + 1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth + 1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth + 1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
String operation = getPartialOperation(partialOperationMap, name, properties);
if (operation != null) {
g.writeFieldName(operation);
} else {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(UPDATE_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null) {
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (simpleObjectFields != null) {
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(UPDATE_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties) {
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
private static String getStackTraceAsString(Throwable throwable) {
try (StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter, true)) {
throwable.printStackTrace(printWriter);
return stringWriter.getBuffer().toString();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void warnLog(String msg, PigWarning warning) {
warn(msg, warning);
System.err.println(msg);
}
} |
can we check it's a partial operation first, then generate the field name according to its data type? currently, u specifically check it's a `remove/add _bag_as_map_fields`, then transform the bag to field name directly. It'll be more general to depend on data type than on field name. | private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
if (isPartialOperation(REMOVE_BAG_AS_MAP_FIELDS, name, properties, g, PARTIAL_UPDATE_REMOVE, false) ||
isPartialOperation(ADD_BAG_AS_MAP_FIELDS, name, properties, g, PARTIAL_UPDATE_ASSIGN, false)){
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (isPartialOperation(REMOVE_BAG_AS_MAP_FIELDS, name, properties, g, PARTIAL_UPDATE_REMOVE, false)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
}else{
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth+1);
} else {
writeValue(v, t, g, name, properties, valueSchema, op, depth+1);
}
}
}
}
}else{
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
} | isPartialOperation(ADD_BAG_AS_MAP_FIELDS, name, properties, g, PARTIAL_UPDATE_ASSIGN, false)){ | private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
String operation = getPartialOperation(mapPartialOperationMap, name, properties);
if (operation != null) {
writePartialUpdateAndRemoveMap(name, value, g, properties, schema, op, depth, operation);
} else {
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String ADD_TENSOR_FIELDS = "add-tensor-fields";
private static final String REMOVE_BAG_AS_MAP_FIELDS = "remove-bag-as-map-fields";
private static final String ADD_BAG_AS_MAP_FIELDS = "add-bag-as-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private final String template;
private final Operation operation;
private final Properties properties;
public VespaDocumentOperation(String... params) {
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
}
@Override
public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
return null;
}
if (template == null || template.length() == 0) {
warn("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
warn("No valid operation found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
System.out.println(docId);
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warn("No valid document operation could be created.", PigWarning.UDF_WARNING_1);
return null;
}
} catch (Exception e) {
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(ExceptionUtils.getStackTrace(e));
warn(sb.toString(), PigWarning.UDF_WARNING_1);
return null;
}
return json;
}
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
@SuppressWarnings("unchecked")
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
writeTensor(map, g, isRemoveTensor(name, properties));
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth+1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth+1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth+1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
if (!isPartialOperation(REMOVE_TENSOR_FIELDS, name, properties, g, PARTIAL_UPDATE_REMOVE, true)
&& !isPartialOperation(REMOVE_BAG_AS_MAP_FIELDS, name, properties, g, PARTIAL_UPDATE_REMOVE, true)
&& !isPartialOperation(ADD_TENSOR_FIELDS, name, properties, g, PARTIAL_UPDATE_ADD, true)) {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean isPartialOperation(String label, String name, Properties properties, JsonGenerator g, String targetOperation, boolean writeFieldName) throws IOException{
boolean isPartialOperation = false;
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
if (writeFieldName) {
g.writeFieldName(targetOperation);
}
isPartialOperation = true;
}
}
return isPartialOperation;
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(ADD_BAG_AS_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null){
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if(simpleObjectFields != null){
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(ADD_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties){
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g, Boolean isRemoveTensor) throws IOException {
if (!isRemoveTensor){
g.writeFieldName("cells");
}else{
g.writeFieldName("address");
}
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
if (!isRemoveTensor){
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}else{
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
}
g.writeEndArray();
}
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String UPDATE_TENSOR_FIELDS = "update-tensor-fields";
private static final String REMOVE_MAP_FIELDS = "remove-map-fields";
private static final String UPDATE_MAP_FIELDS = "update-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static Map<String, String> mapPartialOperationMap;
static {
mapPartialOperationMap = new HashMap<>();
mapPartialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
mapPartialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private static Map<String, String> partialOperationMap;
static {
partialOperationMap = new HashMap<>();
partialOperationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
partialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 0);
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
}
@Override
public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warnLog("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
warnLog("No valid operation found. Skipping.", PigWarning.UDF_WARNING_2);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warnLog("No valid document operation could be created.", PigWarning.UDF_WARNING_3);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(getStackTraceAsString(e));
warnLog(sb.toString(), PigWarning.UDF_WARNING_4);
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 1);
}
return json;
}
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
private static String getPartialOperation(Map<String, String> operationMap, String name, Properties properties) {
for (String label : operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
return operationMap.get(label);
}
}
}
return null;
}
@SuppressWarnings("unchecked")
private static void writePartialUpdateAndRemoveMap(String name, Object value, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth, String operation) throws IOException {
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (operation.equals(PARTIAL_UPDATE_REMOVE)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
} else {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if (isRemoveTensor(name, properties)) {
writeRemoveTensor(map, g);
} else {
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth + 1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth + 1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth + 1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
String operation = getPartialOperation(partialOperationMap, name, properties);
if (operation != null) {
g.writeFieldName(operation);
} else {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(UPDATE_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null) {
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (simpleObjectFields != null) {
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(UPDATE_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties) {
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
private static String getStackTraceAsString(Throwable throwable) {
try (StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter, true)) {
throwable.printStackTrace(printWriter);
return stringWriter.getBuffer().toString();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void warnLog(String msg, PigWarning warning) {
warn(msg, warning);
System.err.println(msg);
}
} |
per discussion, cannot merge writeMapPartialUpdate into writePartialUpdate method because we have to generate special key name of map partial update fields. | private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
if (isPartialOperation(REMOVE_BAG_AS_MAP_FIELDS, name, properties, g, PARTIAL_UPDATE_REMOVE, false) ||
isPartialOperation(ADD_BAG_AS_MAP_FIELDS, name, properties, g, PARTIAL_UPDATE_ASSIGN, false)){
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (isPartialOperation(REMOVE_BAG_AS_MAP_FIELDS, name, properties, g, PARTIAL_UPDATE_REMOVE, false)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
}else{
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth+1);
} else {
writeValue(v, t, g, name, properties, valueSchema, op, depth+1);
}
}
}
}
}else{
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
} | isPartialOperation(ADD_BAG_AS_MAP_FIELDS, name, properties, g, PARTIAL_UPDATE_ASSIGN, false)){ | private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
String operation = getPartialOperation(mapPartialOperationMap, name, properties);
if (operation != null) {
writePartialUpdateAndRemoveMap(name, value, g, properties, schema, op, depth, operation);
} else {
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String ADD_TENSOR_FIELDS = "add-tensor-fields";
private static final String REMOVE_BAG_AS_MAP_FIELDS = "remove-bag-as-map-fields";
private static final String ADD_BAG_AS_MAP_FIELDS = "add-bag-as-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private final String template;
private final Operation operation;
private final Properties properties;
public VespaDocumentOperation(String... params) {
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
}
@Override
public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
return null;
}
if (template == null || template.length() == 0) {
warn("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
warn("No valid operation found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
System.out.println(docId);
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warn("No valid document operation could be created.", PigWarning.UDF_WARNING_1);
return null;
}
} catch (Exception e) {
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(ExceptionUtils.getStackTrace(e));
warn(sb.toString(), PigWarning.UDF_WARNING_1);
return null;
}
return json;
}
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
@SuppressWarnings("unchecked")
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
writeTensor(map, g, isRemoveTensor(name, properties));
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth+1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth+1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth+1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
if (!isPartialOperation(REMOVE_TENSOR_FIELDS, name, properties, g, PARTIAL_UPDATE_REMOVE, true)
&& !isPartialOperation(REMOVE_BAG_AS_MAP_FIELDS, name, properties, g, PARTIAL_UPDATE_REMOVE, true)
&& !isPartialOperation(ADD_TENSOR_FIELDS, name, properties, g, PARTIAL_UPDATE_ADD, true)) {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean isPartialOperation(String label, String name, Properties properties, JsonGenerator g, String targetOperation, boolean writeFieldName) throws IOException{
boolean isPartialOperation = false;
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
if (writeFieldName) {
g.writeFieldName(targetOperation);
}
isPartialOperation = true;
}
}
return isPartialOperation;
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(ADD_BAG_AS_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null){
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if(simpleObjectFields != null){
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(ADD_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties){
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g, Boolean isRemoveTensor) throws IOException {
if (!isRemoveTensor){
g.writeFieldName("cells");
}else{
g.writeFieldName("address");
}
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
if (!isRemoveTensor){
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}else{
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
}
g.writeEndArray();
}
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String UPDATE_TENSOR_FIELDS = "update-tensor-fields";
private static final String REMOVE_MAP_FIELDS = "remove-map-fields";
private static final String UPDATE_MAP_FIELDS = "update-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static Map<String, String> mapPartialOperationMap;
static {
mapPartialOperationMap = new HashMap<>();
mapPartialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
mapPartialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private static Map<String, String> partialOperationMap;
static {
partialOperationMap = new HashMap<>();
partialOperationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
partialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 0);
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
}
@Override
public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warnLog("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
warnLog("No valid operation found. Skipping.", PigWarning.UDF_WARNING_2);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warnLog("No valid document operation could be created.", PigWarning.UDF_WARNING_3);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(getStackTraceAsString(e));
warnLog(sb.toString(), PigWarning.UDF_WARNING_4);
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 1);
}
return json;
}
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
private static String getPartialOperation(Map<String, String> operationMap, String name, Properties properties) {
for (String label : operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
return operationMap.get(label);
}
}
}
return null;
}
@SuppressWarnings("unchecked")
private static void writePartialUpdateAndRemoveMap(String name, Object value, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth, String operation) throws IOException {
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (operation.equals(PARTIAL_UPDATE_REMOVE)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
} else {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if (isRemoveTensor(name, properties)) {
writeRemoveTensor(map, g);
} else {
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth + 1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth + 1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth + 1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
String operation = getPartialOperation(partialOperationMap, name, properties);
if (operation != null) {
g.writeFieldName(operation);
} else {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(UPDATE_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null) {
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (simpleObjectFields != null) {
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(UPDATE_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties) {
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
private static String getStackTraceAsString(Throwable throwable) {
try (StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter, true)) {
throwable.printStackTrace(printWriter);
return stringWriter.getBuffer().toString();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void warnLog(String msg, PigWarning warning) {
warn(msg, warning);
System.err.println(msg);
}
} |
Suggest to merge this two condition into one method. Like the one, shouldWritePartialUpdate. | private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
if (isPartialOperation(REMOVE_BAG_AS_MAP_FIELDS, name, properties) ||
isPartialOperation(ADD_BAG_AS_MAP_FIELDS, name, properties)){
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (isPartialOperation(REMOVE_BAG_AS_MAP_FIELDS, name, properties)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
}else{
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
} else {
writeValue(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
}else{
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
} | isPartialOperation(ADD_BAG_AS_MAP_FIELDS, name, properties)){ | private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
String operation = getPartialOperation(mapPartialOperationMap, name, properties);
if (operation != null) {
writePartialUpdateAndRemoveMap(name, value, g, properties, schema, op, depth, operation);
} else {
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String ADD_TENSOR_FIELDS = "add-tensor-fields";
private static final String REMOVE_BAG_AS_MAP_FIELDS = "remove-bag-as-map-fields";
private static final String ADD_BAG_AS_MAP_FIELDS = "add-bag-as-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static Map<String, String> operationMap;
static {
operationMap = new HashMap<>();
operationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
operationMap.put(REMOVE_BAG_AS_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
operationMap.put(ADD_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
}
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if(statusReporter != null){
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation ok",0);
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation failed",0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
}
@Override
public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if(statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if(statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warn("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
if(statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warn("No valid operation found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warn("No valid document operation could be created.", PigWarning.UDF_WARNING_1);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation failed",1);
}
e.printStackTrace();
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
System.out.println(docId);
/*
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(ExceptionUtils.getStackTrace(e));
warn(sb.toString(), PigWarning.UDF_WARNING_1);
*/
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation ok",1);
}
return json;
}
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
@SuppressWarnings("unchecked")
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if(isRemoveTensor(name,properties)){
writeRemoveTensor(map,g);
}else{
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth+1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth+1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth+1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
boolean isAssign = true;
g.writeStartObject();
for (String label: operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
g.writeFieldName(operationMap.get(label));
isAssign = false;
}
}
}
if (isAssign) {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean isPartialOperation(String label, String name, Properties properties) {
boolean isPartialOperation = false;
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
isPartialOperation = true;
}
}
return isPartialOperation;
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(ADD_BAG_AS_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null){
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if(simpleObjectFields != null){
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(ADD_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties){
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String UPDATE_TENSOR_FIELDS = "update-tensor-fields";
private static final String REMOVE_MAP_FIELDS = "remove-map-fields";
private static final String UPDATE_MAP_FIELDS = "update-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static Map<String, String> mapPartialOperationMap;
static {
mapPartialOperationMap = new HashMap<>();
mapPartialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
mapPartialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private static Map<String, String> partialOperationMap;
static {
partialOperationMap = new HashMap<>();
partialOperationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
partialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 0);
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
}
@Override
public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warnLog("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
warnLog("No valid operation found. Skipping.", PigWarning.UDF_WARNING_2);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warnLog("No valid document operation could be created.", PigWarning.UDF_WARNING_3);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(getStackTraceAsString(e));
warnLog(sb.toString(), PigWarning.UDF_WARNING_4);
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 1);
}
return json;
}
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
private static String getPartialOperation(Map<String, String> operationMap, String name, Properties properties) {
for (String label : operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
return operationMap.get(label);
}
}
}
return null;
}
@SuppressWarnings("unchecked")
private static void writePartialUpdateAndRemoveMap(String name, Object value, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth, String operation) throws IOException {
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (operation.equals(PARTIAL_UPDATE_REMOVE)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
} else {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if (isRemoveTensor(name, properties)) {
writeRemoveTensor(map, g);
} else {
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth + 1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth + 1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth + 1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
String operation = getPartialOperation(partialOperationMap, name, properties);
if (operation != null) {
g.writeFieldName(operation);
} else {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(UPDATE_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null) {
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (simpleObjectFields != null) {
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(UPDATE_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties) {
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
private static String getStackTraceAsString(Throwable throwable) {
try (StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter, true)) {
throwable.printStackTrace(printWriter);
return stringWriter.getBuffer().toString();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void warnLog(String msg, PigWarning warning) {
warn(msg, warning);
System.err.println(msg);
}
} |
Extract line 228~257 into a new method. This could increase the readability of method, writeField. | private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
if (isPartialOperation(REMOVE_BAG_AS_MAP_FIELDS, name, properties) ||
isPartialOperation(ADD_BAG_AS_MAP_FIELDS, name, properties)){
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (isPartialOperation(REMOVE_BAG_AS_MAP_FIELDS, name, properties)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
}else{
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
} else {
writeValue(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
}else{
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
} | schema = (schema != null) ? schema.getField(0).schema : null; | private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
String operation = getPartialOperation(mapPartialOperationMap, name, properties);
if (operation != null) {
writePartialUpdateAndRemoveMap(name, value, g, properties, schema, op, depth, operation);
} else {
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String ADD_TENSOR_FIELDS = "add-tensor-fields";
private static final String REMOVE_BAG_AS_MAP_FIELDS = "remove-bag-as-map-fields";
private static final String ADD_BAG_AS_MAP_FIELDS = "add-bag-as-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static Map<String, String> operationMap;
static {
operationMap = new HashMap<>();
operationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
operationMap.put(REMOVE_BAG_AS_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
operationMap.put(ADD_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
}
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if(statusReporter != null){
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation ok",0);
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation failed",0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
}
@Override
public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if(statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if(statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warn("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
if(statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warn("No valid operation found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warn("No valid document operation could be created.", PigWarning.UDF_WARNING_1);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation failed",1);
}
e.printStackTrace();
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
System.out.println(docId);
/*
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(ExceptionUtils.getStackTrace(e));
warn(sb.toString(), PigWarning.UDF_WARNING_1);
*/
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation ok",1);
}
return json;
}
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
@SuppressWarnings("unchecked")
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if(isRemoveTensor(name,properties)){
writeRemoveTensor(map,g);
}else{
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth+1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth+1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth+1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
boolean isAssign = true;
g.writeStartObject();
for (String label: operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
g.writeFieldName(operationMap.get(label));
isAssign = false;
}
}
}
if (isAssign) {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean isPartialOperation(String label, String name, Properties properties) {
boolean isPartialOperation = false;
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
isPartialOperation = true;
}
}
return isPartialOperation;
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(ADD_BAG_AS_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null){
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if(simpleObjectFields != null){
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(ADD_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties){
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String UPDATE_TENSOR_FIELDS = "update-tensor-fields";
private static final String REMOVE_MAP_FIELDS = "remove-map-fields";
private static final String UPDATE_MAP_FIELDS = "update-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static Map<String, String> mapPartialOperationMap;
static {
mapPartialOperationMap = new HashMap<>();
mapPartialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
mapPartialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private static Map<String, String> partialOperationMap;
static {
partialOperationMap = new HashMap<>();
partialOperationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
partialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 0);
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
}
@Override
public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warnLog("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
warnLog("No valid operation found. Skipping.", PigWarning.UDF_WARNING_2);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warnLog("No valid document operation could be created.", PigWarning.UDF_WARNING_3);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(getStackTraceAsString(e));
warnLog(sb.toString(), PigWarning.UDF_WARNING_4);
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 1);
}
return json;
}
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
private static String getPartialOperation(Map<String, String> operationMap, String name, Properties properties) {
for (String label : operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
return operationMap.get(label);
}
}
}
return null;
}
@SuppressWarnings("unchecked")
private static void writePartialUpdateAndRemoveMap(String name, Object value, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth, String operation) throws IOException {
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (operation.equals(PARTIAL_UPDATE_REMOVE)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
} else {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if (isRemoveTensor(name, properties)) {
writeRemoveTensor(map, g);
} else {
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth + 1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth + 1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth + 1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
String operation = getPartialOperation(partialOperationMap, name, properties);
if (operation != null) {
g.writeFieldName(operation);
} else {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(UPDATE_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null) {
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (simpleObjectFields != null) {
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(UPDATE_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties) {
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
private static String getStackTraceAsString(Throwable throwable) {
try (StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter, true)) {
throwable.printStackTrace(printWriter);
return stringWriter.getBuffer().toString();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void warnLog(String msg, PigWarning warning) {
warn(msg, warning);
System.err.println(msg);
}
} |
Remove shouldWritePartialUpdate if-else because else won't be happen here. | private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
if (isPartialOperation(REMOVE_BAG_AS_MAP_FIELDS, name, properties) ||
isPartialOperation(ADD_BAG_AS_MAP_FIELDS, name, properties)){
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (isPartialOperation(REMOVE_BAG_AS_MAP_FIELDS, name, properties)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
}else{
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
} else {
writeValue(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
}else{
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
} | } else { | private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
String operation = getPartialOperation(mapPartialOperationMap, name, properties);
if (operation != null) {
writePartialUpdateAndRemoveMap(name, value, g, properties, schema, op, depth, operation);
} else {
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String ADD_TENSOR_FIELDS = "add-tensor-fields";
private static final String REMOVE_BAG_AS_MAP_FIELDS = "remove-bag-as-map-fields";
private static final String ADD_BAG_AS_MAP_FIELDS = "add-bag-as-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static Map<String, String> operationMap;
static {
operationMap = new HashMap<>();
operationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
operationMap.put(REMOVE_BAG_AS_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
operationMap.put(ADD_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
}
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if(statusReporter != null){
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation ok",0);
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation failed",0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
}
@Override
public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if(statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if(statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warn("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
if(statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warn("No valid operation found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warn("No valid document operation could be created.", PigWarning.UDF_WARNING_1);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation failed",1);
}
e.printStackTrace();
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
System.out.println(docId);
/*
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(ExceptionUtils.getStackTrace(e));
warn(sb.toString(), PigWarning.UDF_WARNING_1);
*/
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation ok",1);
}
return json;
}
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
@SuppressWarnings("unchecked")
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if(isRemoveTensor(name,properties)){
writeRemoveTensor(map,g);
}else{
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth+1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth+1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth+1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
boolean isAssign = true;
g.writeStartObject();
for (String label: operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
g.writeFieldName(operationMap.get(label));
isAssign = false;
}
}
}
if (isAssign) {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean isPartialOperation(String label, String name, Properties properties) {
boolean isPartialOperation = false;
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
isPartialOperation = true;
}
}
return isPartialOperation;
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(ADD_BAG_AS_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null){
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if(simpleObjectFields != null){
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(ADD_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties){
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String UPDATE_TENSOR_FIELDS = "update-tensor-fields";
private static final String REMOVE_MAP_FIELDS = "remove-map-fields";
private static final String UPDATE_MAP_FIELDS = "update-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static Map<String, String> mapPartialOperationMap;
static {
mapPartialOperationMap = new HashMap<>();
mapPartialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
mapPartialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private static Map<String, String> partialOperationMap;
static {
partialOperationMap = new HashMap<>();
partialOperationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
partialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 0);
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
}
@Override
public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warnLog("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
warnLog("No valid operation found. Skipping.", PigWarning.UDF_WARNING_2);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warnLog("No valid document operation could be created.", PigWarning.UDF_WARNING_3);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(getStackTraceAsString(e));
warnLog(sb.toString(), PigWarning.UDF_WARNING_4);
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 1);
}
return json;
}
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
private static String getPartialOperation(Map<String, String> operationMap, String name, Properties properties) {
for (String label : operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
return operationMap.get(label);
}
}
}
return null;
}
@SuppressWarnings("unchecked")
private static void writePartialUpdateAndRemoveMap(String name, Object value, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth, String operation) throws IOException {
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (operation.equals(PARTIAL_UPDATE_REMOVE)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
} else {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if (isRemoveTensor(name, properties)) {
writeRemoveTensor(map, g);
} else {
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth + 1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth + 1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth + 1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
String operation = getPartialOperation(partialOperationMap, name, properties);
if (operation != null) {
g.writeFieldName(operation);
} else {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(UPDATE_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null) {
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (simpleObjectFields != null) {
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(UPDATE_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties) {
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
private static String getStackTraceAsString(Throwable throwable) {
try (StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter, true)) {
throwable.printStackTrace(printWriter);
return stringWriter.getBuffer().toString();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void warnLog(String msg, PigWarning warning) {
warn(msg, warning);
System.err.println(msg);
}
} |
Suggest to change the condition of writing assign operation. There are two type of partial update: 1. single field partial update 2. whole document partial update The 1st one is decided by property. The 2nd one is decided by operation name and the depth. It's better to change the logic to the operation name and the depth so that we know this `assign` is added because of whole document partial update. | private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
boolean isAssign = true;
g.writeStartObject();
for (String label: operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
g.writeFieldName(operationMap.get(label));
isAssign = false;
}
}
}
if (isAssign) {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
} | } | private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
String operation = getPartialOperation(partialOperationMap, name, properties);
if (operation != null) {
g.writeFieldName(operation);
} else {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String ADD_TENSOR_FIELDS = "add-tensor-fields";
private static final String REMOVE_BAG_AS_MAP_FIELDS = "remove-bag-as-map-fields";
private static final String ADD_BAG_AS_MAP_FIELDS = "add-bag-as-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static Map<String, String> operationMap;
static {
operationMap = new HashMap<>();
operationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
operationMap.put(REMOVE_BAG_AS_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
operationMap.put(ADD_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
}
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if(statusReporter != null){
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation ok",0);
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation failed",0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
}
@Override
public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if(statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if(statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warn("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
if(statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warn("No valid operation found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warn("No valid document operation could be created.", PigWarning.UDF_WARNING_1);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation failed",1);
}
e.printStackTrace();
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
System.out.println(docId);
/*
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(ExceptionUtils.getStackTrace(e));
warn(sb.toString(), PigWarning.UDF_WARNING_1);
*/
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation ok",1);
}
return json;
}
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
@SuppressWarnings("unchecked")
private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
if (isPartialOperation(REMOVE_BAG_AS_MAP_FIELDS, name, properties) ||
isPartialOperation(ADD_BAG_AS_MAP_FIELDS, name, properties)){
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (isPartialOperation(REMOVE_BAG_AS_MAP_FIELDS, name, properties)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
}else{
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
} else {
writeValue(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
}else{
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if(isRemoveTensor(name,properties)){
writeRemoveTensor(map,g);
}else{
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth+1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth+1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth+1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static boolean isPartialOperation(String label, String name, Properties properties) {
boolean isPartialOperation = false;
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
isPartialOperation = true;
}
}
return isPartialOperation;
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(ADD_BAG_AS_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null){
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if(simpleObjectFields != null){
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(ADD_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties){
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String UPDATE_TENSOR_FIELDS = "update-tensor-fields";
private static final String REMOVE_MAP_FIELDS = "remove-map-fields";
private static final String UPDATE_MAP_FIELDS = "update-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static Map<String, String> mapPartialOperationMap;
static {
mapPartialOperationMap = new HashMap<>();
mapPartialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
mapPartialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private static Map<String, String> partialOperationMap;
static {
partialOperationMap = new HashMap<>();
partialOperationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
partialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 0);
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
}
@Override
public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warnLog("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
warnLog("No valid operation found. Skipping.", PigWarning.UDF_WARNING_2);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warnLog("No valid document operation could be created.", PigWarning.UDF_WARNING_3);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(getStackTraceAsString(e));
warnLog(sb.toString(), PigWarning.UDF_WARNING_4);
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 1);
}
return json;
}
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
private static String getPartialOperation(Map<String, String> operationMap, String name, Properties properties) {
for (String label : operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
return operationMap.get(label);
}
}
}
return null;
}
@SuppressWarnings("unchecked")
private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
String operation = getPartialOperation(mapPartialOperationMap, name, properties);
if (operation != null) {
writePartialUpdateAndRemoveMap(name, value, g, properties, schema, op, depth, operation);
} else {
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
}
private static void writePartialUpdateAndRemoveMap(String name, Object value, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth, String operation) throws IOException {
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (operation.equals(PARTIAL_UPDATE_REMOVE)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
} else {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if (isRemoveTensor(name, properties)) {
writeRemoveTensor(map, g);
} else {
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth + 1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth + 1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth + 1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(UPDATE_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null) {
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (simpleObjectFields != null) {
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(UPDATE_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties) {
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
private static String getStackTraceAsString(Throwable throwable) {
try (StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter, true)) {
throwable.printStackTrace(printWriter);
return stringWriter.getBuffer().toString();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void warnLog(String msg, PigWarning warning) {
warn(msg, warning);
System.err.println(msg);
}
} |
coding style | private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if(isRemoveTensor(name,properties)){
writeRemoveTensor(map,g);
}else{
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth+1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth+1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth+1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
} | writeRemoveTensor(map,g); | private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if (isRemoveTensor(name, properties)) {
writeRemoveTensor(map, g);
} else {
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth + 1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth + 1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth + 1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String ADD_TENSOR_FIELDS = "add-tensor-fields";
private static final String REMOVE_BAG_AS_MAP_FIELDS = "remove-bag-as-map-fields";
private static final String ADD_BAG_AS_MAP_FIELDS = "add-bag-as-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static Map<String, String> operationMap;
static {
operationMap = new HashMap<>();
operationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
operationMap.put(REMOVE_BAG_AS_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
operationMap.put(ADD_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
}
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if(statusReporter != null){
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation ok",0);
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation failed",0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
}
@Override
public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if(statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if(statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warn("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
if(statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warn("No valid operation found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warn("No valid document operation could be created.", PigWarning.UDF_WARNING_1);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation failed",1);
}
e.printStackTrace();
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
System.out.println(docId);
/*
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(ExceptionUtils.getStackTrace(e));
warn(sb.toString(), PigWarning.UDF_WARNING_1);
*/
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation ok",1);
}
return json;
}
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
@SuppressWarnings("unchecked")
private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
if (isPartialOperation(REMOVE_BAG_AS_MAP_FIELDS, name, properties) ||
isPartialOperation(ADD_BAG_AS_MAP_FIELDS, name, properties)){
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (isPartialOperation(REMOVE_BAG_AS_MAP_FIELDS, name, properties)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
}else{
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
} else {
writeValue(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
}else{
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
boolean isAssign = true;
g.writeStartObject();
for (String label: operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
g.writeFieldName(operationMap.get(label));
isAssign = false;
}
}
}
if (isAssign) {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean isPartialOperation(String label, String name, Properties properties) {
boolean isPartialOperation = false;
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
isPartialOperation = true;
}
}
return isPartialOperation;
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(ADD_BAG_AS_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null){
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if(simpleObjectFields != null){
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(ADD_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties){
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String UPDATE_TENSOR_FIELDS = "update-tensor-fields";
private static final String REMOVE_MAP_FIELDS = "remove-map-fields";
private static final String UPDATE_MAP_FIELDS = "update-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static Map<String, String> mapPartialOperationMap;
static {
mapPartialOperationMap = new HashMap<>();
mapPartialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
mapPartialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private static Map<String, String> partialOperationMap;
static {
partialOperationMap = new HashMap<>();
partialOperationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
partialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 0);
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
}
@Override
public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warnLog("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
warnLog("No valid operation found. Skipping.", PigWarning.UDF_WARNING_2);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warnLog("No valid document operation could be created.", PigWarning.UDF_WARNING_3);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(getStackTraceAsString(e));
warnLog(sb.toString(), PigWarning.UDF_WARNING_4);
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 1);
}
return json;
}
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
private static String getPartialOperation(Map<String, String> operationMap, String name, Properties properties) {
for (String label : operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
return operationMap.get(label);
}
}
}
return null;
}
@SuppressWarnings("unchecked")
private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
String operation = getPartialOperation(mapPartialOperationMap, name, properties);
if (operation != null) {
writePartialUpdateAndRemoveMap(name, value, g, properties, schema, op, depth, operation);
} else {
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
}
private static void writePartialUpdateAndRemoveMap(String name, Object value, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth, String operation) throws IOException {
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (operation.equals(PARTIAL_UPDATE_REMOVE)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
} else {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
String operation = getPartialOperation(partialOperationMap, name, properties);
if (operation != null) {
g.writeFieldName(operation);
} else {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(UPDATE_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null) {
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (simpleObjectFields != null) {
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(UPDATE_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties) {
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
private static String getStackTraceAsString(Throwable throwable) {
try (StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter, true)) {
throwable.printStackTrace(printWriter);
return stringWriter.getBuffer().toString();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void warnLog(String msg, PigWarning warning) {
warn(msg, warning);
System.err.println(msg);
}
} |
coding style | private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
String operation = getPartialOperation(mapPartialOperationMap, name, properties);
if (operation != null) {
writePartialUpdateAndRemoveMap(name ,value, g, properties, schema, op, depth, operation);
}else{
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
} | }else{ | private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
String operation = getPartialOperation(mapPartialOperationMap, name, properties);
if (operation != null) {
writePartialUpdateAndRemoveMap(name, value, g, properties, schema, op, depth, operation);
} else {
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String UPDATE_TENSOR_FIELDS = "update-tensor-fields";
private static final String REMOVE_MAP_FIELDS = "remove-map-fields";
private static final String UPDATE_MAP_FIELDS = "update-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static Map<String, String> mapPartialOperationMap;
static {
mapPartialOperationMap = new HashMap<>();
mapPartialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
mapPartialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private static Map<String, String> partialOperationMap;
static {
partialOperationMap = new HashMap<>();
partialOperationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
partialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if(statusReporter != null){
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation ok",0);
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation failed",0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
}
@Override
public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if(statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if(statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warn("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
if(statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warn("No valid operation found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warn("No valid document operation could be created.", PigWarning.UDF_WARNING_1);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation failed",1);
}
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(ExceptionUtils.getStackTrace(e));
warn(sb.toString(), PigWarning.UDF_WARNING_1);
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters","Document operation ok",1);
}
return json;
}
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
private static String getPartialOperation(Map<String, String> operationMap, String name, Properties properties) {
for (String label: operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
return operationMap.get(label);
}
}
}
return null;
}
@SuppressWarnings("unchecked")
private static void writePartialUpdateAndRemoveMap(String name, Object value, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth, String operation) throws IOException {
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (operation.equals(PARTIAL_UPDATE_REMOVE)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
}else{
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if(isRemoveTensor(name,properties)){
writeRemoveTensor(map, g);
}else{
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth+1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth+1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth+1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
String operation = getPartialOperation(partialOperationMap, name, properties);
if (operation != null) {
g.writeFieldName(operation);
}else{
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean isPartialOperation(String label, String name, Properties properties) {
boolean isPartialOperation = false;
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
isPartialOperation = true;
}
}
return isPartialOperation;
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(UPDATE_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null){
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if(simpleObjectFields != null){
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(UPDATE_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties){
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String UPDATE_TENSOR_FIELDS = "update-tensor-fields";
private static final String REMOVE_MAP_FIELDS = "remove-map-fields";
private static final String UPDATE_MAP_FIELDS = "update-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static Map<String, String> mapPartialOperationMap;
static {
mapPartialOperationMap = new HashMap<>();
mapPartialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
mapPartialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private static Map<String, String> partialOperationMap;
static {
partialOperationMap = new HashMap<>();
partialOperationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
partialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 0);
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
}
@Override
public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warnLog("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
warnLog("No valid operation found. Skipping.", PigWarning.UDF_WARNING_2);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warnLog("No valid document operation could be created.", PigWarning.UDF_WARNING_3);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(getStackTraceAsString(e));
warnLog(sb.toString(), PigWarning.UDF_WARNING_4);
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 1);
}
return json;
}
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
private static String getPartialOperation(Map<String, String> operationMap, String name, Properties properties) {
for (String label : operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
return operationMap.get(label);
}
}
}
return null;
}
@SuppressWarnings("unchecked")
private static void writePartialUpdateAndRemoveMap(String name, Object value, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth, String operation) throws IOException {
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (operation.equals(PARTIAL_UPDATE_REMOVE)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
} else {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if (isRemoveTensor(name, properties)) {
writeRemoveTensor(map, g);
} else {
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth + 1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth + 1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth + 1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
String operation = getPartialOperation(partialOperationMap, name, properties);
if (operation != null) {
g.writeFieldName(operation);
} else {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(UPDATE_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null) {
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (simpleObjectFields != null) {
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(UPDATE_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties) {
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
private static String getStackTraceAsString(Throwable throwable) {
try (StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter, true)) {
throwable.printStackTrace(printWriter);
return stringWriter.getBuffer().toString();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void warnLog(String msg, PigWarning warning) {
warn(msg, warning);
System.err.println(msg);
}
} |
Do we have any numbers that indicate disabling `TCP_NODELAY` is worth it? We unconditionally enabled nodelay for everything to get rid of spurious latency spikes incurred by the in-kernel deferred packet sending. | public Connection(TransportThread parent, Supervisor owner, Spec spec, Object context, boolean tcpNoDelay) {
super(context);
this.parent = parent;
this.owner = owner;
this.spec = spec;
this.tcpNoDelay = tcpNoDelay;
server = false;
owner.sessionInit(this);
} | this.tcpNoDelay = tcpNoDelay; | public Connection(TransportThread parent, Supervisor owner, Spec spec, Object context, boolean tcpNoDelay) {
super(context);
this.parent = parent;
this.owner = owner;
this.spec = spec;
this.tcpNoDelay = tcpNoDelay;
server = false;
owner.sessionInit(this);
} | class Connection extends Target {
private static final Logger log = Logger.getLogger(Connection.class.getName());
private static final int READ_SIZE = 32768;
private static final int READ_REDO = 10;
private static final int WRITE_SIZE = 32768;
private static final int WRITE_REDO = 10;
private static final int INITIAL = 0;
private static final int CONNECTING = 1;
private static final int CONNECTED = 2;
private static final int CLOSED = 3;
private int state = INITIAL;
private final Queue queue = new Queue();
private final Queue myQueue = new Queue();
private final Buffer input = new Buffer(READ_SIZE * 2);
private final Buffer output = new Buffer(WRITE_SIZE * 2);
private int maxInputSize = 64*1024;
private int maxOutputSize = 64*1024;
private final boolean tcpNoDelay;
private final Map<Integer, ReplyHandler> replyMap = new HashMap<>();
private final Map<TargetWatcher, TargetWatcher> watchers = new IdentityHashMap<>();
private int activeReqs = 0;
private int writeWork = 0;
private boolean pendingHandshakeWork = false;
private final TransportThread parent;
private final Supervisor owner;
private final Spec spec;
private CryptoSocket socket;
private int readSize = READ_SIZE;
private final boolean server;
private final AtomicLong requestId = new AtomicLong(0);
private SelectionKey selectionKey;
private Exception lostReason = null;
private void setState(int state) {
if (state <= this.state) {
log.log(Level.WARNING, "Bogus state transition: " + this.state + "->" + state);
return;
}
boolean live = (state == CONNECTED);
boolean down = (state == CLOSED);
boolean fini;
boolean pendingWrite;
synchronized (this) {
this.state = state;
fini = down && (activeReqs == 0);
pendingWrite = (writeWork > 0);
}
if (live) {
enableRead();
if (pendingWrite) {
enableWrite();
} else {
disableWrite();
}
owner.sessionLive(this);
}
if (down) {
for (ReplyHandler rh : replyMap.values()) {
rh.handleConnectionDown();
}
for (TargetWatcher watcher : watchers.values()) {
watcher.notifyTargetInvalid(this);
}
owner.sessionDown(this);
}
if (fini) {
owner.sessionFini(this);
}
}
public Connection(TransportThread parent, Supervisor owner,
SocketChannel channel, boolean tcpNoDelay) {
this.parent = parent;
this.owner = owner;
this.socket = parent.transport().createServerCryptoSocket(channel);
this.spec = null;
this.tcpNoDelay = tcpNoDelay;
server = true;
owner.sessionInit(this);
}
public void setMaxInputSize(int bytes) {
maxInputSize = bytes;
}
public void setMaxOutputSize(int bytes) {
maxOutputSize = bytes;
}
public TransportThread transportThread() {
return parent;
}
public int allocateKey() {
long v = requestId.getAndIncrement();
v = v*2 + (server ? 1 : 0);
int i = (int)(v & 0x7fffffff);
return i;
}
public synchronized boolean cancelReply(ReplyHandler handler) {
if (state == CLOSED) {
return false;
}
ReplyHandler stored = replyMap.remove(handler.key());
if (stored != handler) {
if (stored != null) {
replyMap.put(handler.key(), stored);
}
return false;
}
return true;
}
public boolean postPacket(Packet packet, ReplyHandler handler) {
boolean accepted = false;
boolean enableWrite = false;
synchronized (this) {
if (state <= CONNECTED) {
enableWrite = (writeWork == 0 && state == CONNECTED);
queue.enqueue(packet);
writeWork++;
accepted = true;
if (handler != null) {
replyMap.put(handler.key(), handler);
}
}
}
if (enableWrite) {
parent.enableWrite(this);
}
return accepted;
}
public boolean postPacket(Packet packet) {
return postPacket(packet, null);
}
public Connection connect() {
if (spec == null || spec.malformed()) {
setLostReason(new IllegalArgumentException("jrt: malformed or missing spec"));
return this;
}
try {
socket = parent.transport().createClientCryptoSocket(SocketChannel.open(spec.resolveAddress()), spec);
} catch (Exception e) {
setLostReason(e);
}
return this;
}
public boolean init(Selector selector) {
if (!hasSocket()) {
return false;
}
try {
socket.channel().configureBlocking(false);
socket.channel().socket().setTcpNoDelay(tcpNoDelay);
selectionKey = socket.channel().register(selector,
SelectionKey.OP_READ | SelectionKey.OP_WRITE,
this);
} catch (Exception e) {
log.log(Level.WARNING, "Error initializing connection", e);
setLostReason(e);
return false;
}
setState(CONNECTING);
return true;
}
public void enableRead() {
selectionKey.interestOps(selectionKey.interestOps()
| SelectionKey.OP_READ);
}
public void disableRead() {
selectionKey.interestOps(selectionKey.interestOps()
& ~SelectionKey.OP_READ);
}
public void enableWrite() {
selectionKey.interestOps(selectionKey.interestOps()
| SelectionKey.OP_WRITE);
}
public void disableWrite() {
selectionKey.interestOps(selectionKey.interestOps()
& ~SelectionKey.OP_WRITE);
}
private void handshake() throws IOException {
if (pendingHandshakeWork) {
return;
}
switch (socket.handshake()) {
case DONE:
if (socket.getMinimumReadBufferSize() > readSize) {
readSize = socket.getMinimumReadBufferSize();
}
setState(CONNECTED);
while (socket.drain(input.getChannelWritable(readSize)) > 0) {
handlePackets();
}
break;
case NEED_READ:
enableRead();
disableWrite();
break;
case NEED_WRITE:
disableRead();
enableWrite();
break;
case NEED_WORK:
disableRead();
disableWrite();
pendingHandshakeWork = true;
parent.transport().doHandshakeWork(this);
break;
}
}
public void doHandshakeWork() {
socket.doHandshakeWork();
}
public void handleHandshakeWorkDone() throws IOException {
if (!pendingHandshakeWork) {
throw new IllegalStateException("jrt: got unwanted handshake work done event");
}
pendingHandshakeWork = false;
if (state == CONNECTING) {
handshake();
} else {
throw new IOException("jrt: got handshake work done event in incompatible state: " + state);
}
}
private void handlePackets() throws IOException {
ByteBuffer rb = input.getReadable();
while (true) {
PacketInfo info = PacketInfo.getPacketInfo(rb);
if (info == null || info.packetLength() > rb.remaining()) {
break;
}
owner.readPacket(info);
Packet packet;
try {
packet = info.decodePacket(rb);
} catch (RuntimeException e) {
log.log(Level.WARNING, "got garbage; closing connection: " + toString());
throw new IOException("jrt: decode error", e);
}
ReplyHandler handler;
synchronized (this) {
handler = replyMap.remove(packet.requestId());
}
if (handler != null) {
handler.handleReply(packet);
} else {
owner.handlePacket(this, packet);
}
}
}
private void read() throws IOException {
boolean doneRead = false;
for (int i = 0; !doneRead && i < READ_REDO; i++) {
ByteBuffer wb = input.getChannelWritable(readSize);
if (socket.read(wb) == -1) {
throw new IOException("jrt: Connection closed by peer");
}
doneRead = (wb.remaining() > 0);
handlePackets();
}
while (socket.drain(input.getChannelWritable(readSize)) > 0) {
handlePackets();
}
if (maxInputSize > 0) {
input.shrink(maxInputSize);
}
}
public void handleReadEvent() throws IOException {
if (state == CONNECTED) {
read();
} else if (state == CONNECTING) {
handshake();
} else {
throw new IOException("jrt: got read event in incompatible state: " + state);
}
}
private void write() throws IOException {
synchronized (this) {
queue.flush(myQueue);
}
for (int i = 0; i < WRITE_REDO; i++) {
while (output.bytes() < WRITE_SIZE) {
Packet packet = (Packet) myQueue.dequeue();
if (packet == null) {
break;
}
PacketInfo info = packet.getPacketInfo();
ByteBuffer wb = output.getWritable(info.packetLength());
owner.writePacket(info);
info.encodePacket(packet, wb);
}
ByteBuffer rb = output.getChannelReadable();
if (rb.remaining() == 0) {
break;
}
socket.write(rb);
if (rb.remaining() > 0) {
break;
}
}
int myWriteWork = 0;
if (output.bytes() > 0) {
myWriteWork++;
}
if (socket.flush() == CryptoSocket.FlushResult.NEED_WRITE) {
myWriteWork++;
}
boolean disableWrite;
synchronized (this) {
writeWork = queue.size()
+ myQueue.size()
+ myWriteWork;
disableWrite = (writeWork == 0);
}
if (disableWrite) {
disableWrite();
}
if (maxOutputSize > 0) {
output.shrink(maxOutputSize);
}
}
public void handleWriteEvent() throws IOException {
if (state == CONNECTED) {
write();
} else if (state == CONNECTING) {
handshake();
} else {
throw new IOException("jrt: got write event in incompatible state: " + state);
}
}
public void fini() {
setState(CLOSED);
if (selectionKey != null) {
selectionKey.cancel();
}
}
public boolean isClosed() {
return (state == CLOSED);
}
public boolean hasSocket() {
return ((socket != null) && (socket.channel() != null));
}
public void closeSocket() {
if (hasSocket()) {
try {
socket.channel().socket().close();
} catch (Exception e) {
log.log(Level.WARNING, "Error closing connection", e);
}
}
}
public void setLostReason(Exception e) {
if (lostReason == null) {
lostReason = e;
}
}
public TieBreaker startRequest() {
synchronized (this) {
activeReqs++;
}
return new TieBreaker();
}
public boolean completeRequest(TieBreaker done) {
boolean signalFini = false;
synchronized (this) {
if (!done.first()) {
return false;
}
if (--activeReqs == 0 && state == CLOSED) {
signalFini = true;
}
}
if (signalFini) {
owner.sessionFini(this);
}
return true;
}
public boolean isValid() {
return (state != CLOSED);
}
public Exception getConnectionLostReason() {
return lostReason;
}
@Override
public Optional<SecurityContext> getSecurityContext() {
return Optional.ofNullable(socket)
.flatMap(CryptoSocket::getSecurityContext);
}
public boolean isClient() {
return !server;
}
public boolean isServer() {
return server;
}
public void invokeSync(Request req, double timeout) {
SingleRequestWaiter waiter = new SingleRequestWaiter();
invokeAsync(req, timeout, waiter);
waiter.waitDone();
}
public void invokeAsync(Request req, double timeout,
RequestWaiter waiter) {
if (timeout < 0.0) {
timeout = 0.0;
}
new InvocationClient(this, req, timeout, waiter).invoke();
}
public boolean invokeVoid(Request req) {
return postPacket(new RequestPacket(Packet.FLAG_NOREPLY,
allocateKey(),
req.methodName(),
req.parameters()));
}
public synchronized boolean addWatcher(TargetWatcher watcher) {
if (state == CLOSED) {
return false;
}
watchers.put(watcher, watcher);
return true;
}
public synchronized boolean removeWatcher(TargetWatcher watcher) {
if (state == CLOSED) {
return false;
}
watchers.remove(watcher);
return true;
}
public void close() {
parent.closeConnection(this);
}
public String toString() {
if (hasSocket()) {
return "Connection { " + socket.channel().socket() + " }";
}
return "Connection { no socket, spec " + spec + " }";
}
} | class Connection extends Target {
private static final Logger log = Logger.getLogger(Connection.class.getName());
private static final int READ_SIZE = 32768;
private static final int READ_REDO = 10;
private static final int WRITE_SIZE = 32768;
private static final int WRITE_REDO = 10;
private static final int INITIAL = 0;
private static final int CONNECTING = 1;
private static final int CONNECTED = 2;
private static final int CLOSED = 3;
private int state = INITIAL;
private final Queue queue = new Queue();
private final Queue myQueue = new Queue();
private final Buffer input = new Buffer(READ_SIZE * 2);
private final Buffer output = new Buffer(WRITE_SIZE * 2);
private int maxInputSize = 64*1024;
private int maxOutputSize = 64*1024;
private final boolean tcpNoDelay;
private final Map<Integer, ReplyHandler> replyMap = new HashMap<>();
private final Map<TargetWatcher, TargetWatcher> watchers = new IdentityHashMap<>();
private int activeReqs = 0;
private int writeWork = 0;
private boolean pendingHandshakeWork = false;
private final TransportThread parent;
private final Supervisor owner;
private final Spec spec;
private CryptoSocket socket;
private int readSize = READ_SIZE;
private final boolean server;
private final AtomicLong requestId = new AtomicLong(0);
private SelectionKey selectionKey;
private Exception lostReason = null;
private void setState(int state) {
if (state <= this.state) {
log.log(Level.WARNING, "Bogus state transition: " + this.state + "->" + state);
return;
}
boolean live = (state == CONNECTED);
boolean down = (state == CLOSED);
boolean fini;
boolean pendingWrite;
synchronized (this) {
this.state = state;
fini = down && (activeReqs == 0);
pendingWrite = (writeWork > 0);
}
if (live) {
enableRead();
if (pendingWrite) {
enableWrite();
} else {
disableWrite();
}
owner.sessionLive(this);
}
if (down) {
for (ReplyHandler rh : replyMap.values()) {
rh.handleConnectionDown();
}
for (TargetWatcher watcher : watchers.values()) {
watcher.notifyTargetInvalid(this);
}
owner.sessionDown(this);
}
if (fini) {
owner.sessionFini(this);
}
}
public Connection(TransportThread parent, Supervisor owner,
SocketChannel channel, boolean tcpNoDelay) {
this.parent = parent;
this.owner = owner;
this.socket = parent.transport().createServerCryptoSocket(channel);
this.spec = null;
this.tcpNoDelay = tcpNoDelay;
server = true;
owner.sessionInit(this);
}
public void setMaxInputSize(int bytes) {
maxInputSize = bytes;
}
public void setMaxOutputSize(int bytes) {
maxOutputSize = bytes;
}
public TransportThread transportThread() {
return parent;
}
public int allocateKey() {
long v = requestId.getAndIncrement();
v = v*2 + (server ? 1 : 0);
int i = (int)(v & 0x7fffffff);
return i;
}
public synchronized boolean cancelReply(ReplyHandler handler) {
if (state == CLOSED) {
return false;
}
ReplyHandler stored = replyMap.remove(handler.key());
if (stored != handler) {
if (stored != null) {
replyMap.put(handler.key(), stored);
}
return false;
}
return true;
}
public boolean postPacket(Packet packet, ReplyHandler handler) {
boolean accepted = false;
boolean enableWrite = false;
synchronized (this) {
if (state <= CONNECTED) {
enableWrite = (writeWork == 0 && state == CONNECTED);
queue.enqueue(packet);
writeWork++;
accepted = true;
if (handler != null) {
replyMap.put(handler.key(), handler);
}
}
}
if (enableWrite) {
parent.enableWrite(this);
}
return accepted;
}
public boolean postPacket(Packet packet) {
return postPacket(packet, null);
}
public Connection connect() {
if (spec == null || spec.malformed()) {
setLostReason(new IllegalArgumentException("jrt: malformed or missing spec"));
return this;
}
try {
socket = parent.transport().createClientCryptoSocket(SocketChannel.open(spec.resolveAddress()), spec);
} catch (Exception e) {
setLostReason(e);
}
return this;
}
public boolean init(Selector selector) {
if (!hasSocket()) {
return false;
}
try {
socket.channel().configureBlocking(false);
socket.channel().socket().setTcpNoDelay(tcpNoDelay);
selectionKey = socket.channel().register(selector,
SelectionKey.OP_READ | SelectionKey.OP_WRITE,
this);
} catch (Exception e) {
log.log(Level.WARNING, "Error initializing connection", e);
setLostReason(e);
return false;
}
setState(CONNECTING);
return true;
}
public void enableRead() {
selectionKey.interestOps(selectionKey.interestOps()
| SelectionKey.OP_READ);
}
public void disableRead() {
selectionKey.interestOps(selectionKey.interestOps()
& ~SelectionKey.OP_READ);
}
public void enableWrite() {
selectionKey.interestOps(selectionKey.interestOps()
| SelectionKey.OP_WRITE);
}
public void disableWrite() {
selectionKey.interestOps(selectionKey.interestOps()
& ~SelectionKey.OP_WRITE);
}
private void handshake() throws IOException {
if (pendingHandshakeWork) {
return;
}
switch (socket.handshake()) {
case DONE:
if (socket.getMinimumReadBufferSize() > readSize) {
readSize = socket.getMinimumReadBufferSize();
}
setState(CONNECTED);
while (socket.drain(input.getChannelWritable(readSize)) > 0) {
handlePackets();
}
break;
case NEED_READ:
enableRead();
disableWrite();
break;
case NEED_WRITE:
disableRead();
enableWrite();
break;
case NEED_WORK:
disableRead();
disableWrite();
pendingHandshakeWork = true;
parent.transport().doHandshakeWork(this);
break;
}
}
public void doHandshakeWork() {
socket.doHandshakeWork();
}
public void handleHandshakeWorkDone() throws IOException {
if (!pendingHandshakeWork) {
throw new IllegalStateException("jrt: got unwanted handshake work done event");
}
pendingHandshakeWork = false;
if (state == CONNECTING) {
handshake();
} else {
throw new IOException("jrt: got handshake work done event in incompatible state: " + state);
}
}
private void handlePackets() throws IOException {
ByteBuffer rb = input.getReadable();
while (true) {
PacketInfo info = PacketInfo.getPacketInfo(rb);
if (info == null || info.packetLength() > rb.remaining()) {
break;
}
owner.readPacket(info);
Packet packet;
try {
packet = info.decodePacket(rb);
} catch (RuntimeException e) {
log.log(Level.WARNING, "got garbage; closing connection: " + toString());
throw new IOException("jrt: decode error", e);
}
ReplyHandler handler;
synchronized (this) {
handler = replyMap.remove(packet.requestId());
}
if (handler != null) {
handler.handleReply(packet);
} else {
owner.handlePacket(this, packet);
}
}
}
private void read() throws IOException {
boolean doneRead = false;
for (int i = 0; !doneRead && i < READ_REDO; i++) {
ByteBuffer wb = input.getChannelWritable(readSize);
if (socket.read(wb) == -1) {
throw new IOException("jrt: Connection closed by peer");
}
doneRead = (wb.remaining() > 0);
handlePackets();
}
while (socket.drain(input.getChannelWritable(readSize)) > 0) {
handlePackets();
}
if (maxInputSize > 0) {
input.shrink(maxInputSize);
}
}
public void handleReadEvent() throws IOException {
if (state == CONNECTED) {
read();
} else if (state == CONNECTING) {
handshake();
} else {
throw new IOException("jrt: got read event in incompatible state: " + state);
}
}
private void write() throws IOException {
synchronized (this) {
queue.flush(myQueue);
}
for (int i = 0; i < WRITE_REDO; i++) {
while (output.bytes() < WRITE_SIZE) {
Packet packet = (Packet) myQueue.dequeue();
if (packet == null) {
break;
}
PacketInfo info = packet.getPacketInfo();
ByteBuffer wb = output.getWritable(info.packetLength());
owner.writePacket(info);
info.encodePacket(packet, wb);
}
ByteBuffer rb = output.getChannelReadable();
if (rb.remaining() == 0) {
break;
}
socket.write(rb);
if (rb.remaining() > 0) {
break;
}
}
int myWriteWork = 0;
if (output.bytes() > 0) {
myWriteWork++;
}
if (socket.flush() == CryptoSocket.FlushResult.NEED_WRITE) {
myWriteWork++;
}
boolean disableWrite;
synchronized (this) {
writeWork = queue.size()
+ myQueue.size()
+ myWriteWork;
disableWrite = (writeWork == 0);
}
if (disableWrite) {
disableWrite();
}
if (maxOutputSize > 0) {
output.shrink(maxOutputSize);
}
}
public void handleWriteEvent() throws IOException {
if (state == CONNECTED) {
write();
} else if (state == CONNECTING) {
handshake();
} else {
throw new IOException("jrt: got write event in incompatible state: " + state);
}
}
public void fini() {
setState(CLOSED);
if (selectionKey != null) {
selectionKey.cancel();
}
}
public boolean isClosed() {
return (state == CLOSED);
}
public boolean hasSocket() {
return ((socket != null) && (socket.channel() != null));
}
public void closeSocket() {
if (hasSocket()) {
try {
socket.channel().socket().close();
} catch (Exception e) {
log.log(Level.WARNING, "Error closing connection", e);
}
}
}
public void setLostReason(Exception e) {
if (lostReason == null) {
lostReason = e;
}
}
public TieBreaker startRequest() {
synchronized (this) {
activeReqs++;
}
return new TieBreaker();
}
public boolean completeRequest(TieBreaker done) {
boolean signalFini = false;
synchronized (this) {
if (!done.first()) {
return false;
}
if (--activeReqs == 0 && state == CLOSED) {
signalFini = true;
}
}
if (signalFini) {
owner.sessionFini(this);
}
return true;
}
public boolean isValid() {
return (state != CLOSED);
}
public Exception getConnectionLostReason() {
return lostReason;
}
@Override
public Optional<SecurityContext> getSecurityContext() {
return Optional.ofNullable(socket)
.flatMap(CryptoSocket::getSecurityContext);
}
public boolean isClient() {
return !server;
}
public boolean isServer() {
return server;
}
public void invokeSync(Request req, double timeout) {
SingleRequestWaiter waiter = new SingleRequestWaiter();
invokeAsync(req, timeout, waiter);
waiter.waitDone();
}
public void invokeAsync(Request req, double timeout,
RequestWaiter waiter) {
if (timeout < 0.0) {
timeout = 0.0;
}
new InvocationClient(this, req, timeout, waiter).invoke();
}
public boolean invokeVoid(Request req) {
return postPacket(new RequestPacket(Packet.FLAG_NOREPLY,
allocateKey(),
req.methodName(),
req.parameters()));
}
public synchronized boolean addWatcher(TargetWatcher watcher) {
if (state == CLOSED) {
return false;
}
watchers.put(watcher, watcher);
return true;
}
public synchronized boolean removeWatcher(TargetWatcher watcher) {
if (state == CLOSED) {
return false;
}
watchers.remove(watcher);
return true;
}
public void close() {
parent.closeConnection(this);
}
public String toString() {
if (hasSocket()) {
return "Connection { " + socket.channel().socket() + " }";
}
return "Connection { no socket, spec " + spec + " }";
}
} |
I saw significantly lower system cpu when hardcoding it yesterday. However I did a lot of different tests then, and I am not putting more into it than it is a dimension that should be tested further. And for that to happen I need config control over it. | public Connection(TransportThread parent, Supervisor owner, Spec spec, Object context, boolean tcpNoDelay) {
super(context);
this.parent = parent;
this.owner = owner;
this.spec = spec;
this.tcpNoDelay = tcpNoDelay;
server = false;
owner.sessionInit(this);
} | this.tcpNoDelay = tcpNoDelay; | public Connection(TransportThread parent, Supervisor owner, Spec spec, Object context, boolean tcpNoDelay) {
super(context);
this.parent = parent;
this.owner = owner;
this.spec = spec;
this.tcpNoDelay = tcpNoDelay;
server = false;
owner.sessionInit(this);
} | class Connection extends Target {
private static final Logger log = Logger.getLogger(Connection.class.getName());
private static final int READ_SIZE = 32768;
private static final int READ_REDO = 10;
private static final int WRITE_SIZE = 32768;
private static final int WRITE_REDO = 10;
private static final int INITIAL = 0;
private static final int CONNECTING = 1;
private static final int CONNECTED = 2;
private static final int CLOSED = 3;
private int state = INITIAL;
private final Queue queue = new Queue();
private final Queue myQueue = new Queue();
private final Buffer input = new Buffer(READ_SIZE * 2);
private final Buffer output = new Buffer(WRITE_SIZE * 2);
private int maxInputSize = 64*1024;
private int maxOutputSize = 64*1024;
private final boolean tcpNoDelay;
private final Map<Integer, ReplyHandler> replyMap = new HashMap<>();
private final Map<TargetWatcher, TargetWatcher> watchers = new IdentityHashMap<>();
private int activeReqs = 0;
private int writeWork = 0;
private boolean pendingHandshakeWork = false;
private final TransportThread parent;
private final Supervisor owner;
private final Spec spec;
private CryptoSocket socket;
private int readSize = READ_SIZE;
private final boolean server;
private final AtomicLong requestId = new AtomicLong(0);
private SelectionKey selectionKey;
private Exception lostReason = null;
private void setState(int state) {
if (state <= this.state) {
log.log(Level.WARNING, "Bogus state transition: " + this.state + "->" + state);
return;
}
boolean live = (state == CONNECTED);
boolean down = (state == CLOSED);
boolean fini;
boolean pendingWrite;
synchronized (this) {
this.state = state;
fini = down && (activeReqs == 0);
pendingWrite = (writeWork > 0);
}
if (live) {
enableRead();
if (pendingWrite) {
enableWrite();
} else {
disableWrite();
}
owner.sessionLive(this);
}
if (down) {
for (ReplyHandler rh : replyMap.values()) {
rh.handleConnectionDown();
}
for (TargetWatcher watcher : watchers.values()) {
watcher.notifyTargetInvalid(this);
}
owner.sessionDown(this);
}
if (fini) {
owner.sessionFini(this);
}
}
public Connection(TransportThread parent, Supervisor owner,
SocketChannel channel, boolean tcpNoDelay) {
this.parent = parent;
this.owner = owner;
this.socket = parent.transport().createServerCryptoSocket(channel);
this.spec = null;
this.tcpNoDelay = tcpNoDelay;
server = true;
owner.sessionInit(this);
}
public void setMaxInputSize(int bytes) {
maxInputSize = bytes;
}
public void setMaxOutputSize(int bytes) {
maxOutputSize = bytes;
}
public TransportThread transportThread() {
return parent;
}
public int allocateKey() {
long v = requestId.getAndIncrement();
v = v*2 + (server ? 1 : 0);
int i = (int)(v & 0x7fffffff);
return i;
}
public synchronized boolean cancelReply(ReplyHandler handler) {
if (state == CLOSED) {
return false;
}
ReplyHandler stored = replyMap.remove(handler.key());
if (stored != handler) {
if (stored != null) {
replyMap.put(handler.key(), stored);
}
return false;
}
return true;
}
public boolean postPacket(Packet packet, ReplyHandler handler) {
boolean accepted = false;
boolean enableWrite = false;
synchronized (this) {
if (state <= CONNECTED) {
enableWrite = (writeWork == 0 && state == CONNECTED);
queue.enqueue(packet);
writeWork++;
accepted = true;
if (handler != null) {
replyMap.put(handler.key(), handler);
}
}
}
if (enableWrite) {
parent.enableWrite(this);
}
return accepted;
}
public boolean postPacket(Packet packet) {
return postPacket(packet, null);
}
public Connection connect() {
if (spec == null || spec.malformed()) {
setLostReason(new IllegalArgumentException("jrt: malformed or missing spec"));
return this;
}
try {
socket = parent.transport().createClientCryptoSocket(SocketChannel.open(spec.resolveAddress()), spec);
} catch (Exception e) {
setLostReason(e);
}
return this;
}
public boolean init(Selector selector) {
if (!hasSocket()) {
return false;
}
try {
socket.channel().configureBlocking(false);
socket.channel().socket().setTcpNoDelay(tcpNoDelay);
selectionKey = socket.channel().register(selector,
SelectionKey.OP_READ | SelectionKey.OP_WRITE,
this);
} catch (Exception e) {
log.log(Level.WARNING, "Error initializing connection", e);
setLostReason(e);
return false;
}
setState(CONNECTING);
return true;
}
public void enableRead() {
selectionKey.interestOps(selectionKey.interestOps()
| SelectionKey.OP_READ);
}
public void disableRead() {
selectionKey.interestOps(selectionKey.interestOps()
& ~SelectionKey.OP_READ);
}
public void enableWrite() {
selectionKey.interestOps(selectionKey.interestOps()
| SelectionKey.OP_WRITE);
}
public void disableWrite() {
selectionKey.interestOps(selectionKey.interestOps()
& ~SelectionKey.OP_WRITE);
}
private void handshake() throws IOException {
if (pendingHandshakeWork) {
return;
}
switch (socket.handshake()) {
case DONE:
if (socket.getMinimumReadBufferSize() > readSize) {
readSize = socket.getMinimumReadBufferSize();
}
setState(CONNECTED);
while (socket.drain(input.getChannelWritable(readSize)) > 0) {
handlePackets();
}
break;
case NEED_READ:
enableRead();
disableWrite();
break;
case NEED_WRITE:
disableRead();
enableWrite();
break;
case NEED_WORK:
disableRead();
disableWrite();
pendingHandshakeWork = true;
parent.transport().doHandshakeWork(this);
break;
}
}
public void doHandshakeWork() {
socket.doHandshakeWork();
}
public void handleHandshakeWorkDone() throws IOException {
if (!pendingHandshakeWork) {
throw new IllegalStateException("jrt: got unwanted handshake work done event");
}
pendingHandshakeWork = false;
if (state == CONNECTING) {
handshake();
} else {
throw new IOException("jrt: got handshake work done event in incompatible state: " + state);
}
}
private void handlePackets() throws IOException {
ByteBuffer rb = input.getReadable();
while (true) {
PacketInfo info = PacketInfo.getPacketInfo(rb);
if (info == null || info.packetLength() > rb.remaining()) {
break;
}
owner.readPacket(info);
Packet packet;
try {
packet = info.decodePacket(rb);
} catch (RuntimeException e) {
log.log(Level.WARNING, "got garbage; closing connection: " + toString());
throw new IOException("jrt: decode error", e);
}
ReplyHandler handler;
synchronized (this) {
handler = replyMap.remove(packet.requestId());
}
if (handler != null) {
handler.handleReply(packet);
} else {
owner.handlePacket(this, packet);
}
}
}
private void read() throws IOException {
boolean doneRead = false;
for (int i = 0; !doneRead && i < READ_REDO; i++) {
ByteBuffer wb = input.getChannelWritable(readSize);
if (socket.read(wb) == -1) {
throw new IOException("jrt: Connection closed by peer");
}
doneRead = (wb.remaining() > 0);
handlePackets();
}
while (socket.drain(input.getChannelWritable(readSize)) > 0) {
handlePackets();
}
if (maxInputSize > 0) {
input.shrink(maxInputSize);
}
}
public void handleReadEvent() throws IOException {
if (state == CONNECTED) {
read();
} else if (state == CONNECTING) {
handshake();
} else {
throw new IOException("jrt: got read event in incompatible state: " + state);
}
}
private void write() throws IOException {
synchronized (this) {
queue.flush(myQueue);
}
for (int i = 0; i < WRITE_REDO; i++) {
while (output.bytes() < WRITE_SIZE) {
Packet packet = (Packet) myQueue.dequeue();
if (packet == null) {
break;
}
PacketInfo info = packet.getPacketInfo();
ByteBuffer wb = output.getWritable(info.packetLength());
owner.writePacket(info);
info.encodePacket(packet, wb);
}
ByteBuffer rb = output.getChannelReadable();
if (rb.remaining() == 0) {
break;
}
socket.write(rb);
if (rb.remaining() > 0) {
break;
}
}
int myWriteWork = 0;
if (output.bytes() > 0) {
myWriteWork++;
}
if (socket.flush() == CryptoSocket.FlushResult.NEED_WRITE) {
myWriteWork++;
}
boolean disableWrite;
synchronized (this) {
writeWork = queue.size()
+ myQueue.size()
+ myWriteWork;
disableWrite = (writeWork == 0);
}
if (disableWrite) {
disableWrite();
}
if (maxOutputSize > 0) {
output.shrink(maxOutputSize);
}
}
public void handleWriteEvent() throws IOException {
if (state == CONNECTED) {
write();
} else if (state == CONNECTING) {
handshake();
} else {
throw new IOException("jrt: got write event in incompatible state: " + state);
}
}
public void fini() {
setState(CLOSED);
if (selectionKey != null) {
selectionKey.cancel();
}
}
public boolean isClosed() {
return (state == CLOSED);
}
public boolean hasSocket() {
return ((socket != null) && (socket.channel() != null));
}
public void closeSocket() {
if (hasSocket()) {
try {
socket.channel().socket().close();
} catch (Exception e) {
log.log(Level.WARNING, "Error closing connection", e);
}
}
}
public void setLostReason(Exception e) {
if (lostReason == null) {
lostReason = e;
}
}
public TieBreaker startRequest() {
synchronized (this) {
activeReqs++;
}
return new TieBreaker();
}
public boolean completeRequest(TieBreaker done) {
boolean signalFini = false;
synchronized (this) {
if (!done.first()) {
return false;
}
if (--activeReqs == 0 && state == CLOSED) {
signalFini = true;
}
}
if (signalFini) {
owner.sessionFini(this);
}
return true;
}
public boolean isValid() {
return (state != CLOSED);
}
public Exception getConnectionLostReason() {
return lostReason;
}
@Override
public Optional<SecurityContext> getSecurityContext() {
return Optional.ofNullable(socket)
.flatMap(CryptoSocket::getSecurityContext);
}
public boolean isClient() {
return !server;
}
public boolean isServer() {
return server;
}
public void invokeSync(Request req, double timeout) {
SingleRequestWaiter waiter = new SingleRequestWaiter();
invokeAsync(req, timeout, waiter);
waiter.waitDone();
}
public void invokeAsync(Request req, double timeout,
RequestWaiter waiter) {
if (timeout < 0.0) {
timeout = 0.0;
}
new InvocationClient(this, req, timeout, waiter).invoke();
}
public boolean invokeVoid(Request req) {
return postPacket(new RequestPacket(Packet.FLAG_NOREPLY,
allocateKey(),
req.methodName(),
req.parameters()));
}
public synchronized boolean addWatcher(TargetWatcher watcher) {
if (state == CLOSED) {
return false;
}
watchers.put(watcher, watcher);
return true;
}
public synchronized boolean removeWatcher(TargetWatcher watcher) {
if (state == CLOSED) {
return false;
}
watchers.remove(watcher);
return true;
}
public void close() {
parent.closeConnection(this);
}
public String toString() {
if (hasSocket()) {
return "Connection { " + socket.channel().socket() + " }";
}
return "Connection { no socket, spec " + spec + " }";
}
} | class Connection extends Target {
private static final Logger log = Logger.getLogger(Connection.class.getName());
private static final int READ_SIZE = 32768;
private static final int READ_REDO = 10;
private static final int WRITE_SIZE = 32768;
private static final int WRITE_REDO = 10;
private static final int INITIAL = 0;
private static final int CONNECTING = 1;
private static final int CONNECTED = 2;
private static final int CLOSED = 3;
private int state = INITIAL;
private final Queue queue = new Queue();
private final Queue myQueue = new Queue();
private final Buffer input = new Buffer(READ_SIZE * 2);
private final Buffer output = new Buffer(WRITE_SIZE * 2);
private int maxInputSize = 64*1024;
private int maxOutputSize = 64*1024;
private final boolean tcpNoDelay;
private final Map<Integer, ReplyHandler> replyMap = new HashMap<>();
private final Map<TargetWatcher, TargetWatcher> watchers = new IdentityHashMap<>();
private int activeReqs = 0;
private int writeWork = 0;
private boolean pendingHandshakeWork = false;
private final TransportThread parent;
private final Supervisor owner;
private final Spec spec;
private CryptoSocket socket;
private int readSize = READ_SIZE;
private final boolean server;
private final AtomicLong requestId = new AtomicLong(0);
private SelectionKey selectionKey;
private Exception lostReason = null;
private void setState(int state) {
if (state <= this.state) {
log.log(Level.WARNING, "Bogus state transition: " + this.state + "->" + state);
return;
}
boolean live = (state == CONNECTED);
boolean down = (state == CLOSED);
boolean fini;
boolean pendingWrite;
synchronized (this) {
this.state = state;
fini = down && (activeReqs == 0);
pendingWrite = (writeWork > 0);
}
if (live) {
enableRead();
if (pendingWrite) {
enableWrite();
} else {
disableWrite();
}
owner.sessionLive(this);
}
if (down) {
for (ReplyHandler rh : replyMap.values()) {
rh.handleConnectionDown();
}
for (TargetWatcher watcher : watchers.values()) {
watcher.notifyTargetInvalid(this);
}
owner.sessionDown(this);
}
if (fini) {
owner.sessionFini(this);
}
}
public Connection(TransportThread parent, Supervisor owner,
SocketChannel channel, boolean tcpNoDelay) {
this.parent = parent;
this.owner = owner;
this.socket = parent.transport().createServerCryptoSocket(channel);
this.spec = null;
this.tcpNoDelay = tcpNoDelay;
server = true;
owner.sessionInit(this);
}
public void setMaxInputSize(int bytes) {
maxInputSize = bytes;
}
public void setMaxOutputSize(int bytes) {
maxOutputSize = bytes;
}
public TransportThread transportThread() {
return parent;
}
public int allocateKey() {
long v = requestId.getAndIncrement();
v = v*2 + (server ? 1 : 0);
int i = (int)(v & 0x7fffffff);
return i;
}
public synchronized boolean cancelReply(ReplyHandler handler) {
if (state == CLOSED) {
return false;
}
ReplyHandler stored = replyMap.remove(handler.key());
if (stored != handler) {
if (stored != null) {
replyMap.put(handler.key(), stored);
}
return false;
}
return true;
}
public boolean postPacket(Packet packet, ReplyHandler handler) {
boolean accepted = false;
boolean enableWrite = false;
synchronized (this) {
if (state <= CONNECTED) {
enableWrite = (writeWork == 0 && state == CONNECTED);
queue.enqueue(packet);
writeWork++;
accepted = true;
if (handler != null) {
replyMap.put(handler.key(), handler);
}
}
}
if (enableWrite) {
parent.enableWrite(this);
}
return accepted;
}
public boolean postPacket(Packet packet) {
return postPacket(packet, null);
}
public Connection connect() {
if (spec == null || spec.malformed()) {
setLostReason(new IllegalArgumentException("jrt: malformed or missing spec"));
return this;
}
try {
socket = parent.transport().createClientCryptoSocket(SocketChannel.open(spec.resolveAddress()), spec);
} catch (Exception e) {
setLostReason(e);
}
return this;
}
public boolean init(Selector selector) {
if (!hasSocket()) {
return false;
}
try {
socket.channel().configureBlocking(false);
socket.channel().socket().setTcpNoDelay(tcpNoDelay);
selectionKey = socket.channel().register(selector,
SelectionKey.OP_READ | SelectionKey.OP_WRITE,
this);
} catch (Exception e) {
log.log(Level.WARNING, "Error initializing connection", e);
setLostReason(e);
return false;
}
setState(CONNECTING);
return true;
}
public void enableRead() {
selectionKey.interestOps(selectionKey.interestOps()
| SelectionKey.OP_READ);
}
public void disableRead() {
selectionKey.interestOps(selectionKey.interestOps()
& ~SelectionKey.OP_READ);
}
public void enableWrite() {
selectionKey.interestOps(selectionKey.interestOps()
| SelectionKey.OP_WRITE);
}
public void disableWrite() {
selectionKey.interestOps(selectionKey.interestOps()
& ~SelectionKey.OP_WRITE);
}
private void handshake() throws IOException {
if (pendingHandshakeWork) {
return;
}
switch (socket.handshake()) {
case DONE:
if (socket.getMinimumReadBufferSize() > readSize) {
readSize = socket.getMinimumReadBufferSize();
}
setState(CONNECTED);
while (socket.drain(input.getChannelWritable(readSize)) > 0) {
handlePackets();
}
break;
case NEED_READ:
enableRead();
disableWrite();
break;
case NEED_WRITE:
disableRead();
enableWrite();
break;
case NEED_WORK:
disableRead();
disableWrite();
pendingHandshakeWork = true;
parent.transport().doHandshakeWork(this);
break;
}
}
public void doHandshakeWork() {
socket.doHandshakeWork();
}
public void handleHandshakeWorkDone() throws IOException {
if (!pendingHandshakeWork) {
throw new IllegalStateException("jrt: got unwanted handshake work done event");
}
pendingHandshakeWork = false;
if (state == CONNECTING) {
handshake();
} else {
throw new IOException("jrt: got handshake work done event in incompatible state: " + state);
}
}
private void handlePackets() throws IOException {
ByteBuffer rb = input.getReadable();
while (true) {
PacketInfo info = PacketInfo.getPacketInfo(rb);
if (info == null || info.packetLength() > rb.remaining()) {
break;
}
owner.readPacket(info);
Packet packet;
try {
packet = info.decodePacket(rb);
} catch (RuntimeException e) {
log.log(Level.WARNING, "got garbage; closing connection: " + toString());
throw new IOException("jrt: decode error", e);
}
ReplyHandler handler;
synchronized (this) {
handler = replyMap.remove(packet.requestId());
}
if (handler != null) {
handler.handleReply(packet);
} else {
owner.handlePacket(this, packet);
}
}
}
private void read() throws IOException {
boolean doneRead = false;
for (int i = 0; !doneRead && i < READ_REDO; i++) {
ByteBuffer wb = input.getChannelWritable(readSize);
if (socket.read(wb) == -1) {
throw new IOException("jrt: Connection closed by peer");
}
doneRead = (wb.remaining() > 0);
handlePackets();
}
while (socket.drain(input.getChannelWritable(readSize)) > 0) {
handlePackets();
}
if (maxInputSize > 0) {
input.shrink(maxInputSize);
}
}
public void handleReadEvent() throws IOException {
if (state == CONNECTED) {
read();
} else if (state == CONNECTING) {
handshake();
} else {
throw new IOException("jrt: got read event in incompatible state: " + state);
}
}
private void write() throws IOException {
synchronized (this) {
queue.flush(myQueue);
}
for (int i = 0; i < WRITE_REDO; i++) {
while (output.bytes() < WRITE_SIZE) {
Packet packet = (Packet) myQueue.dequeue();
if (packet == null) {
break;
}
PacketInfo info = packet.getPacketInfo();
ByteBuffer wb = output.getWritable(info.packetLength());
owner.writePacket(info);
info.encodePacket(packet, wb);
}
ByteBuffer rb = output.getChannelReadable();
if (rb.remaining() == 0) {
break;
}
socket.write(rb);
if (rb.remaining() > 0) {
break;
}
}
int myWriteWork = 0;
if (output.bytes() > 0) {
myWriteWork++;
}
if (socket.flush() == CryptoSocket.FlushResult.NEED_WRITE) {
myWriteWork++;
}
boolean disableWrite;
synchronized (this) {
writeWork = queue.size()
+ myQueue.size()
+ myWriteWork;
disableWrite = (writeWork == 0);
}
if (disableWrite) {
disableWrite();
}
if (maxOutputSize > 0) {
output.shrink(maxOutputSize);
}
}
public void handleWriteEvent() throws IOException {
if (state == CONNECTED) {
write();
} else if (state == CONNECTING) {
handshake();
} else {
throw new IOException("jrt: got write event in incompatible state: " + state);
}
}
public void fini() {
setState(CLOSED);
if (selectionKey != null) {
selectionKey.cancel();
}
}
public boolean isClosed() {
return (state == CLOSED);
}
public boolean hasSocket() {
return ((socket != null) && (socket.channel() != null));
}
public void closeSocket() {
if (hasSocket()) {
try {
socket.channel().socket().close();
} catch (Exception e) {
log.log(Level.WARNING, "Error closing connection", e);
}
}
}
public void setLostReason(Exception e) {
if (lostReason == null) {
lostReason = e;
}
}
public TieBreaker startRequest() {
synchronized (this) {
activeReqs++;
}
return new TieBreaker();
}
public boolean completeRequest(TieBreaker done) {
boolean signalFini = false;
synchronized (this) {
if (!done.first()) {
return false;
}
if (--activeReqs == 0 && state == CLOSED) {
signalFini = true;
}
}
if (signalFini) {
owner.sessionFini(this);
}
return true;
}
public boolean isValid() {
return (state != CLOSED);
}
public Exception getConnectionLostReason() {
return lostReason;
}
@Override
public Optional<SecurityContext> getSecurityContext() {
return Optional.ofNullable(socket)
.flatMap(CryptoSocket::getSecurityContext);
}
public boolean isClient() {
return !server;
}
public boolean isServer() {
return server;
}
public void invokeSync(Request req, double timeout) {
SingleRequestWaiter waiter = new SingleRequestWaiter();
invokeAsync(req, timeout, waiter);
waiter.waitDone();
}
public void invokeAsync(Request req, double timeout,
RequestWaiter waiter) {
if (timeout < 0.0) {
timeout = 0.0;
}
new InvocationClient(this, req, timeout, waiter).invoke();
}
public boolean invokeVoid(Request req) {
return postPacket(new RequestPacket(Packet.FLAG_NOREPLY,
allocateKey(),
req.methodName(),
req.parameters()));
}
public synchronized boolean addWatcher(TargetWatcher watcher) {
if (state == CLOSED) {
return false;
}
watchers.put(watcher, watcher);
return true;
}
public synchronized boolean removeWatcher(TargetWatcher watcher) {
if (state == CLOSED) {
return false;
}
watchers.remove(watcher);
return true;
}
public void close() {
parent.closeConnection(this);
}
public String toString() {
if (hasSocket()) {
return "Connection { " + socket.channel().socket() + " }";
}
return "Connection { no socket, spec " + spec + " }";
}
} |
Verify that `min <= max`? | private Capacity(ClusterResources min, ClusterResources max, boolean required, boolean canFail, NodeType type) {
this.min = min;
this.max = max;
this.required = required;
this.canFail = canFail;
this.type = type;
} | this.min = min; | private Capacity(ClusterResources min, ClusterResources max, boolean required, boolean canFail, NodeType type) {
this.min = min;
this.max = max;
this.required = required;
this.canFail = canFail;
this.type = type;
} | class Capacity {
/** Resources should stay between these values, inclusive */
private final ClusterResources min, max;
private final boolean required;
private final boolean canFail;
private final NodeType type;
/** Returns the number of nodes requested */
@Deprecated
public int nodeCount() { return min.nodes(); }
/** Returns the number of nodes requested (across all groups), or 0 if not specified */
@Deprecated
public int nodes() { return min.nodes(); }
/** Returns the number of groups requested, or 0 if not specified */
@Deprecated
public int groups() { return min.groups(); }
/**
* The node flavor requested, or empty if no legacy flavor name has been used.
* This may be satisfied by the requested flavor or a suitable replacement.
*
* @deprecated use nodeResources instead
*/
@Deprecated
public Optional<String> flavor() {
if (nodeResources().isEmpty()) return Optional.empty();
return Optional.of(min.nodeResources().toString());
}
/** Returns the resources requested for each node, or empty to leave this decision to provisioning */
@Deprecated
public Optional<NodeResources> nodeResources() {
if (min.nodeResources() == NodeResources.unspecified) return Optional.empty();
return Optional.of(min.nodeResources());
}
public ClusterResources minResources() { return min; }
public ClusterResources maxResources() { return max; }
/** Returns whether the requested number of nodes must be met exactly for a request for this to succeed */
public boolean isRequired() { return required; }
/**
* Returns true if an exception should be thrown if the specified capacity can not be satisfied
* (to whatever policies are applied and taking required true/false into account).
* Returns false if it is preferable to still succeed with partially satisfied capacity.
*/
public boolean canFail() { return canFail; }
/**
* Returns the node type (role) requested. This is tenant nodes by default.
* If some other type is requested the node count and flavor may be ignored
* and all nodes of the requested type returned instead.
*/
public NodeType type() { return type; }
public Capacity withGroups(int groups) {
return new Capacity(min.withGroups(groups), max.withGroups(groups), required, canFail, type);
}
@Override
public String toString() {
return (required ? "required " : "") +
(min.equals(max) ? min : "between " + min + " and " + max);
}
/** Create a non-required, failable capacity request */
public static Capacity from(ClusterResources resources) {
return from(resources, false, true);
}
public static Capacity from(ClusterResources resources, boolean required, boolean canFail) {
return from(resources, required, canFail, NodeType.tenant);
}
public static Capacity from(ClusterResources min, ClusterResources max, boolean required, boolean canFail) {
return new Capacity(min, max, required, canFail, NodeType.tenant);
}
/** Create a non-required, failable capacity request */
@Deprecated
public static Capacity fromCount(int nodes, NodeResources resources) {
return fromCount(nodes, resources, false, true);
}
@Deprecated
public static Capacity fromCount(int nodes, NodeResources resources, boolean required, boolean canFail) {
return fromCount(nodes, Optional.of(resources), required, canFail);
}
@Deprecated
public static Capacity fromCount(int nodes, Optional<NodeResources> resources, boolean required, boolean canFail) {
return from(new ClusterResources(nodes, 0, resources.orElse(NodeResources.unspecified)),
required, canFail, NodeType.tenant);
}
/** Creates this from a node type */
public static Capacity fromRequiredNodeType(NodeType type) {
return from(new ClusterResources(0, 0, NodeResources.unspecified), true, false, type);
}
private static Capacity from(ClusterResources resources, boolean required, boolean canFail, NodeType type) {
return new Capacity(resources, resources, required, canFail, type);
}
} | class Capacity {
/** Resources should stay between these values, inclusive */
private final ClusterResources min, max;
private final boolean required;
private final boolean canFail;
private final NodeType type;
/** Returns the number of nodes requested */
@Deprecated
public int nodeCount() { return min.nodes(); }
/** Returns the number of nodes requested (across all groups), or 0 if not specified */
@Deprecated
public int nodes() { return min.nodes(); }
/** Returns the number of groups requested, or 0 if not specified */
@Deprecated
public int groups() { return min.groups(); }
/**
* The node flavor requested, or empty if no legacy flavor name has been used.
* This may be satisfied by the requested flavor or a suitable replacement.
*
* @deprecated use nodeResources instead
*/
@Deprecated
public Optional<String> flavor() {
if (nodeResources().isEmpty()) return Optional.empty();
return Optional.of(min.nodeResources().toString());
}
/** Returns the resources requested for each node, or empty to leave this decision to provisioning */
@Deprecated
public Optional<NodeResources> nodeResources() {
if (min.nodeResources() == NodeResources.unspecified) return Optional.empty();
return Optional.of(min.nodeResources());
}
public ClusterResources minResources() { return min; }
public ClusterResources maxResources() { return max; }
/** Returns whether the requested number of nodes must be met exactly for a request for this to succeed */
public boolean isRequired() { return required; }
/**
* Returns true if an exception should be thrown if the specified capacity can not be satisfied
* (to whatever policies are applied and taking required true/false into account).
* Returns false if it is preferable to still succeed with partially satisfied capacity.
*/
public boolean canFail() { return canFail; }
/**
* Returns the node type (role) requested. This is tenant nodes by default.
* If some other type is requested the node count and flavor may be ignored
* and all nodes of the requested type returned instead.
*/
public NodeType type() { return type; }
public Capacity withGroups(int groups) {
return new Capacity(min.withGroups(groups), max.withGroups(groups), required, canFail, type);
}
@Override
public String toString() {
return (required ? "required " : "") +
(min.equals(max) ? min : "between " + min + " and " + max);
}
/** Create a non-required, failable capacity request */
public static Capacity from(ClusterResources resources) {
return from(resources, false, true);
}
public static Capacity from(ClusterResources resources, boolean required, boolean canFail) {
return from(resources, required, canFail, NodeType.tenant);
}
public static Capacity from(ClusterResources min, ClusterResources max, boolean required, boolean canFail) {
return new Capacity(min, max, required, canFail, NodeType.tenant);
}
/** Create a non-required, failable capacity request */
@Deprecated
public static Capacity fromCount(int nodes, NodeResources resources) {
return fromCount(nodes, resources, false, true);
}
@Deprecated
public static Capacity fromCount(int nodes, NodeResources resources, boolean required, boolean canFail) {
return fromCount(nodes, Optional.of(resources), required, canFail);
}
@Deprecated
public static Capacity fromCount(int nodes, Optional<NodeResources> resources, boolean required, boolean canFail) {
return from(new ClusterResources(nodes, 0, resources.orElse(NodeResources.unspecified)),
required, canFail, NodeType.tenant);
}
/** Creates this from a node type */
public static Capacity fromRequiredNodeType(NodeType type) {
return from(new ClusterResources(0, 0, NodeResources.unspecified), true, false, type);
}
private static Capacity from(ClusterResources resources, boolean required, boolean canFail, NodeType type) {
return new Capacity(resources, resources, required, canFail, type);
}
} |
Ok, done. | private Capacity(ClusterResources min, ClusterResources max, boolean required, boolean canFail, NodeType type) {
this.min = min;
this.max = max;
this.required = required;
this.canFail = canFail;
this.type = type;
} | this.min = min; | private Capacity(ClusterResources min, ClusterResources max, boolean required, boolean canFail, NodeType type) {
this.min = min;
this.max = max;
this.required = required;
this.canFail = canFail;
this.type = type;
} | class Capacity {
/** Resources should stay between these values, inclusive */
private final ClusterResources min, max;
private final boolean required;
private final boolean canFail;
private final NodeType type;
/** Returns the number of nodes requested */
@Deprecated
public int nodeCount() { return min.nodes(); }
/** Returns the number of nodes requested (across all groups), or 0 if not specified */
@Deprecated
public int nodes() { return min.nodes(); }
/** Returns the number of groups requested, or 0 if not specified */
@Deprecated
public int groups() { return min.groups(); }
/**
* The node flavor requested, or empty if no legacy flavor name has been used.
* This may be satisfied by the requested flavor or a suitable replacement.
*
* @deprecated use nodeResources instead
*/
@Deprecated
public Optional<String> flavor() {
if (nodeResources().isEmpty()) return Optional.empty();
return Optional.of(min.nodeResources().toString());
}
/** Returns the resources requested for each node, or empty to leave this decision to provisioning */
@Deprecated
public Optional<NodeResources> nodeResources() {
if (min.nodeResources() == NodeResources.unspecified) return Optional.empty();
return Optional.of(min.nodeResources());
}
public ClusterResources minResources() { return min; }
public ClusterResources maxResources() { return max; }
/** Returns whether the requested number of nodes must be met exactly for a request for this to succeed */
public boolean isRequired() { return required; }
/**
* Returns true if an exception should be thrown if the specified capacity can not be satisfied
* (to whatever policies are applied and taking required true/false into account).
* Returns false if it is preferable to still succeed with partially satisfied capacity.
*/
public boolean canFail() { return canFail; }
/**
* Returns the node type (role) requested. This is tenant nodes by default.
* If some other type is requested the node count and flavor may be ignored
* and all nodes of the requested type returned instead.
*/
public NodeType type() { return type; }
public Capacity withGroups(int groups) {
return new Capacity(min.withGroups(groups), max.withGroups(groups), required, canFail, type);
}
@Override
public String toString() {
return (required ? "required " : "") +
(min.equals(max) ? min : "between " + min + " and " + max);
}
/** Create a non-required, failable capacity request */
public static Capacity from(ClusterResources resources) {
return from(resources, false, true);
}
public static Capacity from(ClusterResources resources, boolean required, boolean canFail) {
return from(resources, required, canFail, NodeType.tenant);
}
public static Capacity from(ClusterResources min, ClusterResources max, boolean required, boolean canFail) {
return new Capacity(min, max, required, canFail, NodeType.tenant);
}
/** Create a non-required, failable capacity request */
@Deprecated
public static Capacity fromCount(int nodes, NodeResources resources) {
return fromCount(nodes, resources, false, true);
}
@Deprecated
public static Capacity fromCount(int nodes, NodeResources resources, boolean required, boolean canFail) {
return fromCount(nodes, Optional.of(resources), required, canFail);
}
@Deprecated
public static Capacity fromCount(int nodes, Optional<NodeResources> resources, boolean required, boolean canFail) {
return from(new ClusterResources(nodes, 0, resources.orElse(NodeResources.unspecified)),
required, canFail, NodeType.tenant);
}
/** Creates this from a node type */
public static Capacity fromRequiredNodeType(NodeType type) {
return from(new ClusterResources(0, 0, NodeResources.unspecified), true, false, type);
}
private static Capacity from(ClusterResources resources, boolean required, boolean canFail, NodeType type) {
return new Capacity(resources, resources, required, canFail, type);
}
} | class Capacity {
/** Resources should stay between these values, inclusive */
private final ClusterResources min, max;
private final boolean required;
private final boolean canFail;
private final NodeType type;
/** Returns the number of nodes requested */
@Deprecated
public int nodeCount() { return min.nodes(); }
/** Returns the number of nodes requested (across all groups), or 0 if not specified */
@Deprecated
public int nodes() { return min.nodes(); }
/** Returns the number of groups requested, or 0 if not specified */
@Deprecated
public int groups() { return min.groups(); }
/**
* The node flavor requested, or empty if no legacy flavor name has been used.
* This may be satisfied by the requested flavor or a suitable replacement.
*
* @deprecated use nodeResources instead
*/
@Deprecated
public Optional<String> flavor() {
if (nodeResources().isEmpty()) return Optional.empty();
return Optional.of(min.nodeResources().toString());
}
/** Returns the resources requested for each node, or empty to leave this decision to provisioning */
@Deprecated
public Optional<NodeResources> nodeResources() {
if (min.nodeResources() == NodeResources.unspecified) return Optional.empty();
return Optional.of(min.nodeResources());
}
public ClusterResources minResources() { return min; }
public ClusterResources maxResources() { return max; }
/** Returns whether the requested number of nodes must be met exactly for a request for this to succeed */
public boolean isRequired() { return required; }
/**
* Returns true if an exception should be thrown if the specified capacity can not be satisfied
* (to whatever policies are applied and taking required true/false into account).
* Returns false if it is preferable to still succeed with partially satisfied capacity.
*/
public boolean canFail() { return canFail; }
/**
* Returns the node type (role) requested. This is tenant nodes by default.
* If some other type is requested the node count and flavor may be ignored
* and all nodes of the requested type returned instead.
*/
public NodeType type() { return type; }
public Capacity withGroups(int groups) {
return new Capacity(min.withGroups(groups), max.withGroups(groups), required, canFail, type);
}
@Override
public String toString() {
return (required ? "required " : "") +
(min.equals(max) ? min : "between " + min + " and " + max);
}
/** Create a non-required, failable capacity request */
public static Capacity from(ClusterResources resources) {
return from(resources, false, true);
}
public static Capacity from(ClusterResources resources, boolean required, boolean canFail) {
return from(resources, required, canFail, NodeType.tenant);
}
public static Capacity from(ClusterResources min, ClusterResources max, boolean required, boolean canFail) {
return new Capacity(min, max, required, canFail, NodeType.tenant);
}
/** Create a non-required, failable capacity request */
@Deprecated
public static Capacity fromCount(int nodes, NodeResources resources) {
return fromCount(nodes, resources, false, true);
}
@Deprecated
public static Capacity fromCount(int nodes, NodeResources resources, boolean required, boolean canFail) {
return fromCount(nodes, Optional.of(resources), required, canFail);
}
@Deprecated
public static Capacity fromCount(int nodes, Optional<NodeResources> resources, boolean required, boolean canFail) {
return from(new ClusterResources(nodes, 0, resources.orElse(NodeResources.unspecified)),
required, canFail, NodeType.tenant);
}
/** Creates this from a node type */
public static Capacity fromRequiredNodeType(NodeType type) {
return from(new ClusterResources(0, 0, NodeResources.unspecified), true, false, type);
}
private static Capacity from(ClusterResources resources, boolean required, boolean canFail, NodeType type) {
return new Capacity(resources, resources, required, canFail, type);
}
} |
Consider ```suggestion private List<Address> addressesFromSlime(Inspector object) { return SlimeUtils.entriesStream(object.field(containersKey)) .map(elem -> new Address(elem.field(containerHostnameKey).asString())) .collect(Collectors.toList()); } ``` | private List<Address> addressesFromSlime(Inspector object) {
Inspector addressesField = object.field(containersKey);
if (addressesField.children() == 0)
return List.of();
List<Address> addresses = new ArrayList<>(addressesField.children());
addressesField.traverse((ArrayTraverser) (i, elem) ->
addresses.add(new Address(elem.field(containerHostnameKey).asString())));
return addresses;
} | } | private List<Address> addressesFromSlime(Inspector object) {
return SlimeUtils.entriesStream(object.field(containersKey))
.map(elem -> new Address(elem.field(containerHostnameKey).asString()))
.collect(Collectors.toList());
} | class NodeSerializer {
/** The configured node flavors */
private final NodeFlavors flavors;
private static final String hostnameKey = "hostname";
private static final String ipAddressesKey = "ipAddresses";
private static final String ipAddressPoolKey = "additionalIpAddresses";
private static final String containersKey = "containers";
private static final String containerHostnameKey = "hostname";
private static final String idKey = "openStackId";
private static final String parentHostnameKey = "parentHostname";
private static final String historyKey = "history";
private static final String instanceKey = "instance";
private static final String rebootGenerationKey = "rebootGeneration";
private static final String currentRebootGenerationKey = "currentRebootGeneration";
private static final String vespaVersionKey = "vespaVersion";
private static final String currentContainerImageKey = "currentDockerImage";
private static final String failCountKey = "failCount";
private static final String nodeTypeKey = "type";
private static final String wantToRetireKey = "wantToRetire";
private static final String wantToDeprovisionKey = "wantToDeprovision";
private static final String osVersionKey = "osVersion";
private static final String wantedOsVersionKey = "wantedOsVersion";
private static final String firmwareCheckKey = "firmwareCheck";
private static final String reportsKey = "reports";
private static final String modelNameKey = "modelName";
private static final String reservedToKey = "reservedTo";
private static final String exclusiveToKey = "exclusiveTo";
private static final String switchHostnameKey = "switchHostname";
private static final String flavorKey = "flavor";
private static final String resourcesKey = "resources";
private static final String diskKey = "disk";
private static final String tenantIdKey = "tenantId";
private static final String applicationIdKey = "applicationId";
private static final String instanceIdKey = "instanceId";
private static final String serviceIdKey = "serviceId";
private static final String requestedResourcesKey = "requestedResources";
private static final String restartGenerationKey = "restartGeneration";
private static final String currentRestartGenerationKey = "currentRestartGeneration";
private static final String removableKey = "removable";
private static final String wantedVespaVersionKey = "wantedVespaVersion";
private static final String wantedContainerImageRepoKey = "wantedDockerImageRepo";
private static final String historyEventTypeKey = "type";
private static final String atKey = "at";
private static final String agentKey = "agent";
private static final String networkPortsKey = "networkPorts";
private final Cache<Long, Node> cache;
public NodeSerializer(NodeFlavors flavors, long cacheSize) {
this.flavors = flavors;
this.cache = CacheBuilder.newBuilder().maximumSize(cacheSize).recordStats().build();
}
public byte[] toJson(Node node) {
try {
Slime slime = new Slime();
toSlime(node, slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
catch (IOException e) {
throw new RuntimeException("Serialization of " + node + " to json failed", e);
}
}
/** Returns cache statistics for this serializer */
public CacheStats cacheStats() {
var stats = cache.stats();
return new CacheStats(stats.hitRate(), stats.evictionCount(), cache.size());
}
private void toSlime(Node node, Cursor object) {
object.setString(hostnameKey, node.hostname());
toSlime(node.ipConfig().primary(), object.setArray(ipAddressesKey));
toSlime(node.ipConfig().pool().getIpSet(), object.setArray(ipAddressPoolKey));
toSlime(node.ipConfig().pool().getAddressList(), object);
object.setString(idKey, node.id());
node.parentHostname().ifPresent(hostname -> object.setString(parentHostnameKey, hostname));
toSlime(node.flavor(), object);
object.setLong(rebootGenerationKey, node.status().reboot().wanted());
object.setLong(currentRebootGenerationKey, node.status().reboot().current());
node.status().vespaVersion().ifPresent(version -> object.setString(vespaVersionKey, version.toString()));
node.status().containerImage().ifPresent(image -> object.setString(currentContainerImageKey, image.asString()));
object.setLong(failCountKey, node.status().failCount());
object.setBool(wantToRetireKey, node.status().wantToRetire());
object.setBool(wantToDeprovisionKey, node.status().wantToDeprovision());
node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey)));
toSlime(node.history(), object.setArray(historyKey));
object.setString(nodeTypeKey, toString(node.type()));
node.status().osVersion().current().ifPresent(version -> object.setString(osVersionKey, version.toString()));
node.status().osVersion().wanted().ifPresent(version -> object.setString(wantedOsVersionKey, version.toFullString()));
node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong(firmwareCheckKey, instant.toEpochMilli()));
node.switchHostname().ifPresent(switchHostname -> object.setString(switchHostnameKey, switchHostname));
node.reports().toSlime(object, reportsKey);
node.modelName().ifPresent(modelName -> object.setString(modelNameKey, modelName));
node.reservedTo().ifPresent(tenant -> object.setString(reservedToKey, tenant.value()));
node.exclusiveTo().ifPresent(applicationId -> object.setString(exclusiveToKey, applicationId.serializedForm()));
}
private void toSlime(Flavor flavor, Cursor object) {
if (flavor.isConfigured()) {
object.setString(flavorKey, flavor.name());
if (flavor.flavorOverrides().isPresent()) {
Cursor resourcesObject = object.setObject(resourcesKey);
flavor.flavorOverrides().get().diskGb().ifPresent(diskGb -> resourcesObject.setDouble(diskKey, diskGb));
}
}
else {
NodeResourcesSerializer.toSlime(flavor.resources(), object.setObject(resourcesKey));
}
}
private void toSlime(Allocation allocation, Cursor object) {
NodeResourcesSerializer.toSlime(allocation.requestedResources(), object.setObject(requestedResourcesKey));
object.setString(tenantIdKey, allocation.owner().tenant().value());
object.setString(applicationIdKey, allocation.owner().application().value());
object.setString(instanceIdKey, allocation.owner().instance().value());
object.setString(serviceIdKey, allocation.membership().stringValue());
object.setLong(restartGenerationKey, allocation.restartGeneration().wanted());
object.setLong(currentRestartGenerationKey, allocation.restartGeneration().current());
object.setBool(removableKey, allocation.isRemovable());
object.setString(wantedVespaVersionKey, allocation.membership().cluster().vespaVersion().toString());
allocation.membership().cluster().dockerImageRepo().ifPresent(repo -> object.setString(wantedContainerImageRepoKey, repo.untagged()));
allocation.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray(networkPortsKey)));
}
private void toSlime(History history, Cursor array) {
for (History.Event event : history.events())
toSlime(event, array.addObject());
}
private void toSlime(History.Event event, Cursor object) {
object.setString(historyEventTypeKey, toString(event.type()));
object.setLong(atKey, event.at().toEpochMilli());
object.setString(agentKey, toString(event.agent()));
}
private void toSlime(Set<String> ipAddresses, Cursor array) {
ipAddresses.stream().map(IP::parse).sorted(IP.NATURAL_ORDER).map(IP::asString).forEach(array::addString);
}
private void toSlime(List<Address> addresses, Cursor object) {
if (addresses.isEmpty()) return;
Cursor addressCursor = object.setArray(containersKey);
addresses.forEach(address -> {
addressCursor.addObject().setString(containerHostnameKey, address.hostname());
});
}
public Node fromJson(Node.State state, byte[] data) {
var key = Hashing.sipHash24().newHasher()
.putString(state.name(), StandardCharsets.UTF_8)
.putBytes(data).hash()
.asLong();
try {
return cache.get(key, () -> nodeFromSlime(state, SlimeUtils.jsonToSlime(data).get()));
} catch (ExecutionException e) {
throw new UncheckedExecutionException(e);
}
}
private Node nodeFromSlime(Node.State state, Inspector object) {
Flavor flavor = flavorFromSlime(object);
return new Node(object.field(idKey).asString(),
new IP.Config(ipAddressesFromSlime(object, ipAddressesKey),
ipAddressesFromSlime(object, ipAddressPoolKey),
addressesFromSlime(object)),
object.field(hostnameKey).asString(),
parentHostnameFromSlime(object),
flavor,
statusFromSlime(object),
state,
allocationFromSlime(flavor.resources(), object.field(instanceKey)),
historyFromSlime(object.field(historyKey)),
nodeTypeFromString(object.field(nodeTypeKey).asString()),
Reports.fromSlime(object.field(reportsKey)),
modelNameFromSlime(object),
reservedToFromSlime(object.field(reservedToKey)),
exclusiveToFromSlime(object.field(exclusiveToKey)),
switchHostnameFromSlime(object.field(switchHostnameKey)));
}
private Status statusFromSlime(Inspector object) {
return new Status(generationFromSlime(object, rebootGenerationKey, currentRebootGenerationKey),
versionFromSlime(object.field(vespaVersionKey)),
containerImageFromSlime(object.field(currentContainerImageKey)),
(int) object.field(failCountKey).asLong(),
object.field(wantToRetireKey).asBool(),
object.field(wantToDeprovisionKey).asBool(),
new OsVersion(versionFromSlime(object.field(osVersionKey)),
versionFromSlime(object.field(wantedOsVersionKey))),
instantFromSlime(object.field(firmwareCheckKey)));
}
private Optional<String> switchHostnameFromSlime(Inspector field) {
if (!field.valid()) return Optional.empty();
return Optional.of(field.asString());
}
private Flavor flavorFromSlime(Inspector object) {
Inspector resources = object.field(resourcesKey);
if (object.field(flavorKey).valid()) {
Flavor flavor = flavors.getFlavorOrThrow(object.field(flavorKey).asString());
if (!resources.valid()) return flavor;
return flavor.with(FlavorOverrides.ofDisk(resources.field(diskKey).asDouble()));
}
else {
return new Flavor(NodeResourcesSerializer.resourcesFromSlime(resources));
}
}
private Optional<Allocation> allocationFromSlime(NodeResources assignedResources, Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(new Allocation(applicationIdFromSlime(object),
clusterMembershipFromSlime(object),
NodeResourcesSerializer.optionalResourcesFromSlime(object.field(requestedResourcesKey))
.orElse(assignedResources),
generationFromSlime(object, restartGenerationKey, currentRestartGenerationKey),
object.field(removableKey).asBool(),
NetworkPortsSerializer.fromSlime(object.field(networkPortsKey))));
}
private ApplicationId applicationIdFromSlime(Inspector object) {
return ApplicationId.from(TenantName.from(object.field(tenantIdKey).asString()),
ApplicationName.from(object.field(applicationIdKey).asString()),
InstanceName.from(object.field(instanceIdKey).asString()));
}
private History historyFromSlime(Inspector array) {
List<History.Event> events = new ArrayList<>();
array.traverse((ArrayTraverser) (int i, Inspector item) -> {
History.Event event = eventFromSlime(item);
if (event != null)
events.add(event);
});
return new History(events);
}
private History.Event eventFromSlime(Inspector object) {
History.Event.Type type = eventTypeFromString(object.field(historyEventTypeKey).asString());
if (type == null) return null;
Instant at = Instant.ofEpochMilli(object.field(atKey).asLong());
Agent agent = eventAgentFromSlime(object.field(agentKey));
return new History.Event(type, agent, at);
}
private Generation generationFromSlime(Inspector object, String wantedField, String currentField) {
Inspector current = object.field(currentField);
return new Generation(object.field(wantedField).asLong(), current.asLong());
}
private ClusterMembership clusterMembershipFromSlime(Inspector object) {
return ClusterMembership.from(object.field(serviceIdKey).asString(),
versionFromSlime(object.field(wantedVespaVersionKey)).get(),
containerImageRepoFromSlime(object.field(wantedContainerImageRepoKey)));
}
private Optional<Version> versionFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(Version.fromString(object.asString()));
}
private Optional<DockerImage> containerImageRepoFromSlime(Inspector object) {
if ( ! object.valid() || object.asString().isEmpty()) return Optional.empty();
return Optional.of(DockerImage.fromString(object.asString()));
}
private Optional<DockerImage> containerImageFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(DockerImage.fromString(object.asString()));
}
private Optional<Instant> instantFromSlime(Inspector object) {
if ( ! object.valid())
return Optional.empty();
return Optional.of(Instant.ofEpochMilli(object.asLong()));
}
private Optional<String> parentHostnameFromSlime(Inspector object) {
if (object.field(parentHostnameKey).valid())
return Optional.of(object.field(parentHostnameKey).asString());
else
return Optional.empty();
}
private Set<String> ipAddressesFromSlime(Inspector object, String key) {
ImmutableSet.Builder<String> ipAddresses = ImmutableSet.builder();
object.field(key).traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString()));
return ipAddresses.build();
}
private Optional<String> modelNameFromSlime(Inspector object) {
if (object.field(modelNameKey).valid()) {
return Optional.of(object.field(modelNameKey).asString());
}
return Optional.empty();
}
private Optional<TenantName> reservedToFromSlime(Inspector object) {
if (! object.valid()) return Optional.empty();
if (object.type() != Type.STRING)
throw new IllegalArgumentException("Expected 'reservedTo' to be a string but is " + object);
return Optional.of(TenantName.from(object.asString()));
}
private Optional<ApplicationId> exclusiveToFromSlime(Inspector object) {
if (! object.valid()) return Optional.empty();
if (object.type() != Type.STRING)
throw new IllegalArgumentException("Expected 'exclusiveTo' to be a string but is " + object);
return Optional.of(ApplicationId.fromSerializedForm(object.asString()));
}
/** Returns the event type, or null if this event type should be ignored */
private History.Event.Type eventTypeFromString(String eventTypeString) {
switch (eventTypeString) {
case "provisioned" : return History.Event.Type.provisioned;
case "deprovisioned" : return History.Event.Type.deprovisioned;
case "readied" : return History.Event.Type.readied;
case "reserved" : return History.Event.Type.reserved;
case "activated" : return History.Event.Type.activated;
case "wantToRetire": return History.Event.Type.wantToRetire;
case "retired" : return History.Event.Type.retired;
case "deactivated" : return History.Event.Type.deactivated;
case "parked" : return History.Event.Type.parked;
case "failed" : return History.Event.Type.failed;
case "deallocated" : return History.Event.Type.deallocated;
case "down" : return History.Event.Type.down;
case "requested" : return History.Event.Type.requested;
case "rebooted" : return History.Event.Type.rebooted;
case "osUpgraded" : return History.Event.Type.osUpgraded;
case "firmwareVerified" : return History.Event.Type.firmwareVerified;
case "breakfixed" : return History.Event.Type.breakfixed;
}
throw new IllegalArgumentException("Unknown node event type '" + eventTypeString + "'");
}
private String toString(History.Event.Type nodeEventType) {
switch (nodeEventType) {
case provisioned : return "provisioned";
case deprovisioned : return "deprovisioned";
case readied : return "readied";
case reserved : return "reserved";
case activated : return "activated";
case wantToRetire: return "wantToRetire";
case retired : return "retired";
case deactivated : return "deactivated";
case parked : return "parked";
case failed : return "failed";
case deallocated : return "deallocated";
case down : return "down";
case requested: return "requested";
case rebooted: return "rebooted";
case osUpgraded: return "osUpgraded";
case firmwareVerified: return "firmwareVerified";
case breakfixed: return "breakfixed";
}
throw new IllegalArgumentException("Serialized form of '" + nodeEventType + "' not defined");
}
private Agent eventAgentFromSlime(Inspector eventAgentField) {
switch (eventAgentField.asString()) {
case "operator" : return Agent.operator;
case "application" : return Agent.application;
case "system" : return Agent.system;
case "DirtyExpirer" : return Agent.DirtyExpirer;
case "DynamicProvisioningMaintainer" : return Agent.DynamicProvisioningMaintainer;
case "FailedExpirer" : return Agent.FailedExpirer;
case "InactiveExpirer" : return Agent.InactiveExpirer;
case "NodeFailer" : return Agent.NodeFailer;
case "NodeHealthTracker" : return Agent.NodeHealthTracker;
case "ProvisionedExpirer" : return Agent.ProvisionedExpirer;
case "Rebalancer" : return Agent.Rebalancer;
case "ReservationExpirer" : return Agent.ReservationExpirer;
case "RetiringUpgrader" : return Agent.RetiringUpgrader;
case "SpareCapacityMaintainer": return Agent.SpareCapacityMaintainer;
case "SwitchRebalancer": return Agent.SwitchRebalancer;
}
throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'");
}
private String toString(Agent agent) {
switch (agent) {
case operator : return "operator";
case application : return "application";
case system : return "system";
case DirtyExpirer : return "DirtyExpirer";
case DynamicProvisioningMaintainer : return "DynamicProvisioningMaintainer";
case FailedExpirer : return "FailedExpirer";
case InactiveExpirer : return "InactiveExpirer";
case NodeFailer : return "NodeFailer";
case NodeHealthTracker: return "NodeHealthTracker";
case ProvisionedExpirer : return "ProvisionedExpirer";
case Rebalancer : return "Rebalancer";
case ReservationExpirer : return "ReservationExpirer";
case RetiringUpgrader: return "RetiringUpgrader";
case SpareCapacityMaintainer: return "SpareCapacityMaintainer";
case SwitchRebalancer: return "SwitchRebalancer";
}
throw new IllegalArgumentException("Serialized form of '" + agent + "' not defined");
}
static NodeType nodeTypeFromString(String typeString) {
switch (typeString) {
case "tenant": return NodeType.tenant;
case "host": return NodeType.host;
case "proxy": return NodeType.proxy;
case "proxyhost": return NodeType.proxyhost;
case "config": return NodeType.config;
case "confighost": return NodeType.confighost;
case "controller": return NodeType.controller;
case "controllerhost": return NodeType.controllerhost;
case "devhost": return NodeType.devhost;
default : throw new IllegalArgumentException("Unknown node type '" + typeString + "'");
}
}
static String toString(NodeType type) {
switch (type) {
case tenant: return "tenant";
case host: return "host";
case proxy: return "proxy";
case proxyhost: return "proxyhost";
case config: return "config";
case confighost: return "confighost";
case controller: return "controller";
case controllerhost: return "controllerhost";
case devhost: return "devhost";
}
throw new IllegalArgumentException("Serialized form of '" + type + "' not defined");
}
} | class NodeSerializer {
/** The configured node flavors */
private final NodeFlavors flavors;
private static final String hostnameKey = "hostname";
private static final String ipAddressesKey = "ipAddresses";
private static final String ipAddressPoolKey = "additionalIpAddresses";
private static final String containersKey = "containers";
private static final String containerHostnameKey = "hostname";
private static final String idKey = "openStackId";
private static final String parentHostnameKey = "parentHostname";
private static final String historyKey = "history";
private static final String instanceKey = "instance";
private static final String rebootGenerationKey = "rebootGeneration";
private static final String currentRebootGenerationKey = "currentRebootGeneration";
private static final String vespaVersionKey = "vespaVersion";
private static final String currentContainerImageKey = "currentDockerImage";
private static final String failCountKey = "failCount";
private static final String nodeTypeKey = "type";
private static final String wantToRetireKey = "wantToRetire";
private static final String wantToDeprovisionKey = "wantToDeprovision";
private static final String osVersionKey = "osVersion";
private static final String wantedOsVersionKey = "wantedOsVersion";
private static final String firmwareCheckKey = "firmwareCheck";
private static final String reportsKey = "reports";
private static final String modelNameKey = "modelName";
private static final String reservedToKey = "reservedTo";
private static final String exclusiveToKey = "exclusiveTo";
private static final String switchHostnameKey = "switchHostname";
private static final String flavorKey = "flavor";
private static final String resourcesKey = "resources";
private static final String diskKey = "disk";
private static final String tenantIdKey = "tenantId";
private static final String applicationIdKey = "applicationId";
private static final String instanceIdKey = "instanceId";
private static final String serviceIdKey = "serviceId";
private static final String requestedResourcesKey = "requestedResources";
private static final String restartGenerationKey = "restartGeneration";
private static final String currentRestartGenerationKey = "currentRestartGeneration";
private static final String removableKey = "removable";
private static final String wantedVespaVersionKey = "wantedVespaVersion";
private static final String wantedContainerImageRepoKey = "wantedDockerImageRepo";
private static final String historyEventTypeKey = "type";
private static final String atKey = "at";
private static final String agentKey = "agent";
private static final String networkPortsKey = "networkPorts";
private final Cache<Long, Node> cache;
public NodeSerializer(NodeFlavors flavors, long cacheSize) {
this.flavors = flavors;
this.cache = CacheBuilder.newBuilder().maximumSize(cacheSize).recordStats().build();
}
public byte[] toJson(Node node) {
try {
Slime slime = new Slime();
toSlime(node, slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
catch (IOException e) {
throw new RuntimeException("Serialization of " + node + " to json failed", e);
}
}
/** Returns cache statistics for this serializer */
public CacheStats cacheStats() {
var stats = cache.stats();
return new CacheStats(stats.hitRate(), stats.evictionCount(), cache.size());
}
private void toSlime(Node node, Cursor object) {
object.setString(hostnameKey, node.hostname());
toSlime(node.ipConfig().primary(), object.setArray(ipAddressesKey));
toSlime(node.ipConfig().pool().getIpSet(), object.setArray(ipAddressPoolKey));
toSlime(node.ipConfig().pool().getAddressList(), object);
object.setString(idKey, node.id());
node.parentHostname().ifPresent(hostname -> object.setString(parentHostnameKey, hostname));
toSlime(node.flavor(), object);
object.setLong(rebootGenerationKey, node.status().reboot().wanted());
object.setLong(currentRebootGenerationKey, node.status().reboot().current());
node.status().vespaVersion().ifPresent(version -> object.setString(vespaVersionKey, version.toString()));
node.status().containerImage().ifPresent(image -> object.setString(currentContainerImageKey, image.asString()));
object.setLong(failCountKey, node.status().failCount());
object.setBool(wantToRetireKey, node.status().wantToRetire());
object.setBool(wantToDeprovisionKey, node.status().wantToDeprovision());
node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey)));
toSlime(node.history(), object.setArray(historyKey));
object.setString(nodeTypeKey, toString(node.type()));
node.status().osVersion().current().ifPresent(version -> object.setString(osVersionKey, version.toString()));
node.status().osVersion().wanted().ifPresent(version -> object.setString(wantedOsVersionKey, version.toFullString()));
node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong(firmwareCheckKey, instant.toEpochMilli()));
node.switchHostname().ifPresent(switchHostname -> object.setString(switchHostnameKey, switchHostname));
node.reports().toSlime(object, reportsKey);
node.modelName().ifPresent(modelName -> object.setString(modelNameKey, modelName));
node.reservedTo().ifPresent(tenant -> object.setString(reservedToKey, tenant.value()));
node.exclusiveTo().ifPresent(applicationId -> object.setString(exclusiveToKey, applicationId.serializedForm()));
}
private void toSlime(Flavor flavor, Cursor object) {
if (flavor.isConfigured()) {
object.setString(flavorKey, flavor.name());
if (flavor.flavorOverrides().isPresent()) {
Cursor resourcesObject = object.setObject(resourcesKey);
flavor.flavorOverrides().get().diskGb().ifPresent(diskGb -> resourcesObject.setDouble(diskKey, diskGb));
}
}
else {
NodeResourcesSerializer.toSlime(flavor.resources(), object.setObject(resourcesKey));
}
}
private void toSlime(Allocation allocation, Cursor object) {
NodeResourcesSerializer.toSlime(allocation.requestedResources(), object.setObject(requestedResourcesKey));
object.setString(tenantIdKey, allocation.owner().tenant().value());
object.setString(applicationIdKey, allocation.owner().application().value());
object.setString(instanceIdKey, allocation.owner().instance().value());
object.setString(serviceIdKey, allocation.membership().stringValue());
object.setLong(restartGenerationKey, allocation.restartGeneration().wanted());
object.setLong(currentRestartGenerationKey, allocation.restartGeneration().current());
object.setBool(removableKey, allocation.isRemovable());
object.setString(wantedVespaVersionKey, allocation.membership().cluster().vespaVersion().toString());
allocation.membership().cluster().dockerImageRepo().ifPresent(repo -> object.setString(wantedContainerImageRepoKey, repo.untagged()));
allocation.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray(networkPortsKey)));
}
private void toSlime(History history, Cursor array) {
for (History.Event event : history.events())
toSlime(event, array.addObject());
}
private void toSlime(History.Event event, Cursor object) {
object.setString(historyEventTypeKey, toString(event.type()));
object.setLong(atKey, event.at().toEpochMilli());
object.setString(agentKey, toString(event.agent()));
}
private void toSlime(Set<String> ipAddresses, Cursor array) {
ipAddresses.stream().map(IP::parse).sorted(IP.NATURAL_ORDER).map(IP::asString).forEach(array::addString);
}
private void toSlime(List<Address> addresses, Cursor object) {
if (addresses.isEmpty()) return;
Cursor addressCursor = object.setArray(containersKey);
addresses.forEach(address -> {
addressCursor.addObject().setString(containerHostnameKey, address.hostname());
});
}
public Node fromJson(Node.State state, byte[] data) {
var key = Hashing.sipHash24().newHasher()
.putString(state.name(), StandardCharsets.UTF_8)
.putBytes(data).hash()
.asLong();
try {
return cache.get(key, () -> nodeFromSlime(state, SlimeUtils.jsonToSlime(data).get()));
} catch (ExecutionException e) {
throw new UncheckedExecutionException(e);
}
}
private Node nodeFromSlime(Node.State state, Inspector object) {
Flavor flavor = flavorFromSlime(object);
return new Node(object.field(idKey).asString(),
new IP.Config(ipAddressesFromSlime(object, ipAddressesKey),
ipAddressesFromSlime(object, ipAddressPoolKey),
addressesFromSlime(object)),
object.field(hostnameKey).asString(),
parentHostnameFromSlime(object),
flavor,
statusFromSlime(object),
state,
allocationFromSlime(flavor.resources(), object.field(instanceKey)),
historyFromSlime(object.field(historyKey)),
nodeTypeFromString(object.field(nodeTypeKey).asString()),
Reports.fromSlime(object.field(reportsKey)),
modelNameFromSlime(object),
reservedToFromSlime(object.field(reservedToKey)),
exclusiveToFromSlime(object.field(exclusiveToKey)),
switchHostnameFromSlime(object.field(switchHostnameKey)));
}
private Status statusFromSlime(Inspector object) {
return new Status(generationFromSlime(object, rebootGenerationKey, currentRebootGenerationKey),
versionFromSlime(object.field(vespaVersionKey)),
containerImageFromSlime(object.field(currentContainerImageKey)),
(int) object.field(failCountKey).asLong(),
object.field(wantToRetireKey).asBool(),
object.field(wantToDeprovisionKey).asBool(),
new OsVersion(versionFromSlime(object.field(osVersionKey)),
versionFromSlime(object.field(wantedOsVersionKey))),
instantFromSlime(object.field(firmwareCheckKey)));
}
private Optional<String> switchHostnameFromSlime(Inspector field) {
if (!field.valid()) return Optional.empty();
return Optional.of(field.asString());
}
private Flavor flavorFromSlime(Inspector object) {
Inspector resources = object.field(resourcesKey);
if (object.field(flavorKey).valid()) {
Flavor flavor = flavors.getFlavorOrThrow(object.field(flavorKey).asString());
if (!resources.valid()) return flavor;
return flavor.with(FlavorOverrides.ofDisk(resources.field(diskKey).asDouble()));
}
else {
return new Flavor(NodeResourcesSerializer.resourcesFromSlime(resources));
}
}
private Optional<Allocation> allocationFromSlime(NodeResources assignedResources, Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(new Allocation(applicationIdFromSlime(object),
clusterMembershipFromSlime(object),
NodeResourcesSerializer.optionalResourcesFromSlime(object.field(requestedResourcesKey))
.orElse(assignedResources),
generationFromSlime(object, restartGenerationKey, currentRestartGenerationKey),
object.field(removableKey).asBool(),
NetworkPortsSerializer.fromSlime(object.field(networkPortsKey))));
}
private ApplicationId applicationIdFromSlime(Inspector object) {
return ApplicationId.from(TenantName.from(object.field(tenantIdKey).asString()),
ApplicationName.from(object.field(applicationIdKey).asString()),
InstanceName.from(object.field(instanceIdKey).asString()));
}
private History historyFromSlime(Inspector array) {
List<History.Event> events = new ArrayList<>();
array.traverse((ArrayTraverser) (int i, Inspector item) -> {
History.Event event = eventFromSlime(item);
if (event != null)
events.add(event);
});
return new History(events);
}
private History.Event eventFromSlime(Inspector object) {
History.Event.Type type = eventTypeFromString(object.field(historyEventTypeKey).asString());
if (type == null) return null;
Instant at = Instant.ofEpochMilli(object.field(atKey).asLong());
Agent agent = eventAgentFromSlime(object.field(agentKey));
return new History.Event(type, agent, at);
}
private Generation generationFromSlime(Inspector object, String wantedField, String currentField) {
Inspector current = object.field(currentField);
return new Generation(object.field(wantedField).asLong(), current.asLong());
}
private ClusterMembership clusterMembershipFromSlime(Inspector object) {
return ClusterMembership.from(object.field(serviceIdKey).asString(),
versionFromSlime(object.field(wantedVespaVersionKey)).get(),
containerImageRepoFromSlime(object.field(wantedContainerImageRepoKey)));
}
private Optional<Version> versionFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(Version.fromString(object.asString()));
}
private Optional<DockerImage> containerImageRepoFromSlime(Inspector object) {
if ( ! object.valid() || object.asString().isEmpty()) return Optional.empty();
return Optional.of(DockerImage.fromString(object.asString()));
}
private Optional<DockerImage> containerImageFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(DockerImage.fromString(object.asString()));
}
private Optional<Instant> instantFromSlime(Inspector object) {
if ( ! object.valid())
return Optional.empty();
return Optional.of(Instant.ofEpochMilli(object.asLong()));
}
private Optional<String> parentHostnameFromSlime(Inspector object) {
if (object.field(parentHostnameKey).valid())
return Optional.of(object.field(parentHostnameKey).asString());
else
return Optional.empty();
}
private Set<String> ipAddressesFromSlime(Inspector object, String key) {
ImmutableSet.Builder<String> ipAddresses = ImmutableSet.builder();
object.field(key).traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString()));
return ipAddresses.build();
}
private Optional<String> modelNameFromSlime(Inspector object) {
if (object.field(modelNameKey).valid()) {
return Optional.of(object.field(modelNameKey).asString());
}
return Optional.empty();
}
private Optional<TenantName> reservedToFromSlime(Inspector object) {
if (! object.valid()) return Optional.empty();
if (object.type() != Type.STRING)
throw new IllegalArgumentException("Expected 'reservedTo' to be a string but is " + object);
return Optional.of(TenantName.from(object.asString()));
}
private Optional<ApplicationId> exclusiveToFromSlime(Inspector object) {
if (! object.valid()) return Optional.empty();
if (object.type() != Type.STRING)
throw new IllegalArgumentException("Expected 'exclusiveTo' to be a string but is " + object);
return Optional.of(ApplicationId.fromSerializedForm(object.asString()));
}
/** Returns the event type, or null if this event type should be ignored */
private History.Event.Type eventTypeFromString(String eventTypeString) {
switch (eventTypeString) {
case "provisioned" : return History.Event.Type.provisioned;
case "deprovisioned" : return History.Event.Type.deprovisioned;
case "readied" : return History.Event.Type.readied;
case "reserved" : return History.Event.Type.reserved;
case "activated" : return History.Event.Type.activated;
case "wantToRetire": return History.Event.Type.wantToRetire;
case "retired" : return History.Event.Type.retired;
case "deactivated" : return History.Event.Type.deactivated;
case "parked" : return History.Event.Type.parked;
case "failed" : return History.Event.Type.failed;
case "deallocated" : return History.Event.Type.deallocated;
case "down" : return History.Event.Type.down;
case "requested" : return History.Event.Type.requested;
case "rebooted" : return History.Event.Type.rebooted;
case "osUpgraded" : return History.Event.Type.osUpgraded;
case "firmwareVerified" : return History.Event.Type.firmwareVerified;
case "breakfixed" : return History.Event.Type.breakfixed;
}
throw new IllegalArgumentException("Unknown node event type '" + eventTypeString + "'");
}
private String toString(History.Event.Type nodeEventType) {
switch (nodeEventType) {
case provisioned : return "provisioned";
case deprovisioned : return "deprovisioned";
case readied : return "readied";
case reserved : return "reserved";
case activated : return "activated";
case wantToRetire: return "wantToRetire";
case retired : return "retired";
case deactivated : return "deactivated";
case parked : return "parked";
case failed : return "failed";
case deallocated : return "deallocated";
case down : return "down";
case requested: return "requested";
case rebooted: return "rebooted";
case osUpgraded: return "osUpgraded";
case firmwareVerified: return "firmwareVerified";
case breakfixed: return "breakfixed";
}
throw new IllegalArgumentException("Serialized form of '" + nodeEventType + "' not defined");
}
private Agent eventAgentFromSlime(Inspector eventAgentField) {
switch (eventAgentField.asString()) {
case "operator" : return Agent.operator;
case "application" : return Agent.application;
case "system" : return Agent.system;
case "DirtyExpirer" : return Agent.DirtyExpirer;
case "DynamicProvisioningMaintainer" : return Agent.DynamicProvisioningMaintainer;
case "FailedExpirer" : return Agent.FailedExpirer;
case "InactiveExpirer" : return Agent.InactiveExpirer;
case "NodeFailer" : return Agent.NodeFailer;
case "NodeHealthTracker" : return Agent.NodeHealthTracker;
case "ProvisionedExpirer" : return Agent.ProvisionedExpirer;
case "Rebalancer" : return Agent.Rebalancer;
case "ReservationExpirer" : return Agent.ReservationExpirer;
case "RetiringUpgrader" : return Agent.RetiringUpgrader;
case "SpareCapacityMaintainer": return Agent.SpareCapacityMaintainer;
case "SwitchRebalancer": return Agent.SwitchRebalancer;
}
throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'");
}
private String toString(Agent agent) {
switch (agent) {
case operator : return "operator";
case application : return "application";
case system : return "system";
case DirtyExpirer : return "DirtyExpirer";
case DynamicProvisioningMaintainer : return "DynamicProvisioningMaintainer";
case FailedExpirer : return "FailedExpirer";
case InactiveExpirer : return "InactiveExpirer";
case NodeFailer : return "NodeFailer";
case NodeHealthTracker: return "NodeHealthTracker";
case ProvisionedExpirer : return "ProvisionedExpirer";
case Rebalancer : return "Rebalancer";
case ReservationExpirer : return "ReservationExpirer";
case RetiringUpgrader: return "RetiringUpgrader";
case SpareCapacityMaintainer: return "SpareCapacityMaintainer";
case SwitchRebalancer: return "SwitchRebalancer";
}
throw new IllegalArgumentException("Serialized form of '" + agent + "' not defined");
}
static NodeType nodeTypeFromString(String typeString) {
switch (typeString) {
case "tenant": return NodeType.tenant;
case "host": return NodeType.host;
case "proxy": return NodeType.proxy;
case "proxyhost": return NodeType.proxyhost;
case "config": return NodeType.config;
case "confighost": return NodeType.confighost;
case "controller": return NodeType.controller;
case "controllerhost": return NodeType.controllerhost;
case "devhost": return NodeType.devhost;
default : throw new IllegalArgumentException("Unknown node type '" + typeString + "'");
}
}
static String toString(NodeType type) {
switch (type) {
case tenant: return "tenant";
case host: return "host";
case proxy: return "proxy";
case proxyhost: return "proxyhost";
case config: return "config";
case confighost: return "confighost";
case controller: return "controller";
case controllerhost: return "controllerhost";
case devhost: return "devhost";
}
throw new IllegalArgumentException("Serialized form of '" + type + "' not defined");
}
} |
Consider `orElseGet` to avoid having to do `NodeRepostiory::getNodes` every time. ```suggestion .orElseGet(() -> currentResources(applicationId, clusterId, requested) ``` | private ClusterResources decideTargetResources(ApplicationId applicationId, ClusterSpec.Id clusterId, Capacity requested) {
try (Mutex lock = nodeRepository.lock(applicationId)) {
Application application = nodeRepository.applications().get(applicationId, true);
application.setClusterLimits(clusterId, requested.minResources(), requested.maxResources(), lock);
return application.cluster(clusterId).targetResources()
.orElse(currentResources(applicationId, clusterId, requested)
.orElse(requested.minResources()));
}
} | .orElse(currentResources(applicationId, clusterId, requested) | private ClusterResources decideTargetResources(ApplicationId applicationId, ClusterSpec.Id clusterId, Capacity requested) {
try (Mutex lock = nodeRepository.lock(applicationId)) {
Application application = nodeRepository.applications().get(applicationId, true);
application = application.withClusterLimits(clusterId, requested.minResources(), requested.maxResources());
nodeRepository.applications().set(applicationId, application, lock);
return application.cluster(clusterId).targetResources()
.orElseGet(() -> currentResources(applicationId, clusterId, requested)
.orElse(requested.minResources()));
}
} | class NodeRepositoryProvisioner implements Provisioner {
private static final Logger log = Logger.getLogger(NodeRepositoryProvisioner.class.getName());
private static final int SPARE_CAPACITY_PROD = 0;
private static final int SPARE_CAPACITY_NONPROD = 0;
private final NodeRepository nodeRepository;
private final CapacityPolicies capacityPolicies;
private final Zone zone;
private final Preparer preparer;
private final Activator activator;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
int getSpareCapacityProd() {
return SPARE_CAPACITY_PROD;
}
@Inject
public NodeRepositoryProvisioner(NodeRepository nodeRepository, Zone zone,
ProvisionServiceProvider provisionServiceProvider, FlagSource flagSource) {
this.nodeRepository = nodeRepository;
this.capacityPolicies = new CapacityPolicies(zone);
this.zone = zone;
this.loadBalancerProvisioner = provisionServiceProvider.getLoadBalancerService().map(lbService -> new LoadBalancerProvisioner(nodeRepository, lbService));
this.preparer = new Preparer(nodeRepository,
zone.environment() == Environment.prod ? SPARE_CAPACITY_PROD : SPARE_CAPACITY_NONPROD,
provisionServiceProvider.getHostProvisioner(),
provisionServiceProvider.getHostResourcesCalculator(),
flagSource,
loadBalancerProvisioner);
this.activator = new Activator(nodeRepository, loadBalancerProvisioner);
}
/**
* Returns a list of nodes in the prepared or active state, matching the given constraints.
* The nodes are ordered by increasing index number.
*/
@Deprecated
@Override
public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requestedCapacity,
int wantedGroups, ProvisionLogger logger) {
return prepare(application, cluster, requestedCapacity.withGroups(wantedGroups), logger);
}
/**
* Returns a list of nodes in the prepared or active state, matching the given constraints.
* The nodes are ordered by increasing index number.
*/
@Override
public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requested,
ProvisionLogger logger) {
log.log(zone.system().isCd() ? Level.INFO : LogLevel.DEBUG,
() -> "Received deploy prepare request for " + requested +
" for application " + application + ", cluster " + cluster);
if (cluster.group().isPresent()) throw new IllegalArgumentException("Node requests cannot specify a group");
if ( ! hasQuota(application, requested.maxResources().nodes()))
throw new IllegalArgumentException(requested + " requested for " + cluster +
". Max value exceeds your quota. Resolve this at https:
int groups;
NodeResources resources;
NodeSpec nodeSpec;
if ( requested.type() == NodeType.tenant) {
ClusterResources target = decideTargetResources(application, cluster.id(), requested);
int nodeCount = capacityPolicies.decideSize(target.nodes(), requested, cluster, application);
resources = capacityPolicies.decideNodeResources(target.nodeResources(), requested, cluster);
boolean exclusive = capacityPolicies.decideExclusivity(cluster.isExclusive());
groups = Math.min(target.groups(), nodeCount);
nodeSpec = NodeSpec.from(nodeCount, resources, exclusive, requested.canFail());
logIfDownscaled(target.nodes(), nodeCount, cluster, logger);
}
else {
groups = 1;
resources = requested.minResources().nodeResources();
nodeSpec = NodeSpec.from(requested.type());
}
return asSortedHosts(preparer.prepare(application, cluster, nodeSpec, groups), resources);
}
@Override
public void activate(NestedTransaction transaction, ApplicationId application, Collection<HostSpec> hosts) {
validate(hosts);
activator.activate(application, hosts, transaction);
}
@Override
public void restart(ApplicationId application, HostFilter filter) {
nodeRepository.restart(ApplicationFilter.from(application, NodeHostFilter.from(filter)));
}
@Override
public void remove(NestedTransaction transaction, ApplicationId application) {
nodeRepository.deactivate(application, transaction);
loadBalancerProvisioner.ifPresent(lbProvisioner -> lbProvisioner.deactivate(application, transaction));
}
/**
* Returns the target cluster resources, a value between the min and max in the requested capacity,
* and updates the application store with the received min and max,
*/
/** Returns the current resources of this cluster, if it'1s already depoyed and inside the requested limits */
private Optional<ClusterResources> currentResources(ApplicationId applicationId,
ClusterSpec.Id clusterId,
Capacity requested) {
List<Node> nodes = NodeList.copyOf(nodeRepository.getNodes(applicationId, Node.State.active))
.cluster(clusterId).not().retired().asList();
if (nodes.size() < 1) return Optional.empty();
long groups = nodes.stream().map(node -> node.allocation().get().membership().cluster().group()).distinct().count();
var resources = new ClusterResources(nodes.size(), (int)groups, nodes.get(0).flavor().resources());
if ( ! resources.isWithin(requested.minResources(), requested.maxResources())) return Optional.empty();
return Optional.of(resources);
}
private void logIfDownscaled(int targetNodes, int actualNodes, ClusterSpec cluster, ProvisionLogger logger) {
if (zone.environment().isManuallyDeployed() && actualNodes < targetNodes)
logger.log(Level.INFO, "Requested " + targetNodes + " nodes for " + cluster +
", downscaling to " + actualNodes + " nodes in " + zone.environment());
}
private boolean hasQuota(ApplicationId application, int requestedNodes) {
if ( ! this.zone.system().isPublic()) return true;
if (application.tenant().value().hashCode() == 3857) return requestedNodes <= 60;
if (application.tenant().value().hashCode() == -1271827001) return requestedNodes <= 75;
return requestedNodes <= 5;
}
private List<HostSpec> asSortedHosts(List<Node> nodes, NodeResources requestedResources) {
nodes.sort(Comparator.comparingInt(node -> node.allocation().get().membership().index()));
List<HostSpec> hosts = new ArrayList<>(nodes.size());
for (Node node : nodes) {
log.log(LogLevel.DEBUG, () -> "Prepared node " + node.hostname() + " - " + node.flavor());
Allocation nodeAllocation = node.allocation().orElseThrow(IllegalStateException::new);
hosts.add(new HostSpec(node.hostname(),
List.of(),
Optional.of(node.flavor()),
Optional.of(nodeAllocation.membership()),
node.status().vespaVersion(),
nodeAllocation.networkPorts(),
requestedResources == NodeResources.unspecified ? Optional.empty() : Optional.of(requestedResources),
node.status().dockerImage().map(DockerImage::repository)));
if (nodeAllocation.networkPorts().isPresent()) {
log.log(LogLevel.DEBUG, () -> "Prepared node " + node.hostname() + " has port allocations");
}
}
return hosts;
}
private void validate(Collection<HostSpec> hosts) {
for (HostSpec host : hosts) {
if (host.membership().isEmpty())
throw new IllegalArgumentException("Hosts must be assigned a cluster when activating, but got " + host);
if (host.membership().get().cluster().group().isEmpty())
throw new IllegalArgumentException("Hosts must be assigned a group when activating, but got " + host);
}
}
} | class NodeRepositoryProvisioner implements Provisioner {
private static final Logger log = Logger.getLogger(NodeRepositoryProvisioner.class.getName());
private static final int SPARE_CAPACITY_PROD = 0;
private static final int SPARE_CAPACITY_NONPROD = 0;
private final NodeRepository nodeRepository;
private final CapacityPolicies capacityPolicies;
private final Zone zone;
private final Preparer preparer;
private final Activator activator;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
int getSpareCapacityProd() {
return SPARE_CAPACITY_PROD;
}
@Inject
public NodeRepositoryProvisioner(NodeRepository nodeRepository, Zone zone,
ProvisionServiceProvider provisionServiceProvider, FlagSource flagSource) {
this.nodeRepository = nodeRepository;
this.capacityPolicies = new CapacityPolicies(zone);
this.zone = zone;
this.loadBalancerProvisioner = provisionServiceProvider.getLoadBalancerService().map(lbService -> new LoadBalancerProvisioner(nodeRepository, lbService));
this.preparer = new Preparer(nodeRepository,
zone.environment() == Environment.prod ? SPARE_CAPACITY_PROD : SPARE_CAPACITY_NONPROD,
provisionServiceProvider.getHostProvisioner(),
provisionServiceProvider.getHostResourcesCalculator(),
flagSource,
loadBalancerProvisioner);
this.activator = new Activator(nodeRepository, loadBalancerProvisioner);
}
/**
* Returns a list of nodes in the prepared or active state, matching the given constraints.
* The nodes are ordered by increasing index number.
*/
@Deprecated
@Override
public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requestedCapacity,
int wantedGroups, ProvisionLogger logger) {
return prepare(application, cluster, requestedCapacity.withGroups(wantedGroups), logger);
}
/**
* Returns a list of nodes in the prepared or active state, matching the given constraints.
* The nodes are ordered by increasing index number.
*/
@Override
public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requested,
ProvisionLogger logger) {
log.log(zone.system().isCd() ? Level.INFO : LogLevel.DEBUG,
() -> "Received deploy prepare request for " + requested +
" for application " + application + ", cluster " + cluster);
if (cluster.group().isPresent()) throw new IllegalArgumentException("Node requests cannot specify a group");
if ( ! hasQuota(application, requested.maxResources().nodes()))
throw new IllegalArgumentException(requested + " requested for " + cluster +
". Max value exceeds your quota. Resolve this at https:
int groups;
NodeResources resources;
NodeSpec nodeSpec;
if ( requested.type() == NodeType.tenant) {
ClusterResources target = decideTargetResources(application, cluster.id(), requested);
int nodeCount = capacityPolicies.decideSize(target.nodes(), requested, cluster, application);
resources = capacityPolicies.decideNodeResources(target.nodeResources(), requested, cluster);
boolean exclusive = capacityPolicies.decideExclusivity(cluster.isExclusive());
groups = Math.min(target.groups(), nodeCount);
nodeSpec = NodeSpec.from(nodeCount, resources, exclusive, requested.canFail());
logIfDownscaled(target.nodes(), nodeCount, cluster, logger);
}
else {
groups = 1;
resources = requested.minResources().nodeResources();
nodeSpec = NodeSpec.from(requested.type());
}
return asSortedHosts(preparer.prepare(application, cluster, nodeSpec, groups), resources);
}
@Override
public void activate(NestedTransaction transaction, ApplicationId application, Collection<HostSpec> hosts) {
validate(hosts);
activator.activate(application, hosts, transaction);
}
@Override
public void restart(ApplicationId application, HostFilter filter) {
nodeRepository.restart(ApplicationFilter.from(application, NodeHostFilter.from(filter)));
}
@Override
public void remove(NestedTransaction transaction, ApplicationId application) {
nodeRepository.deactivate(application, transaction);
loadBalancerProvisioner.ifPresent(lbProvisioner -> lbProvisioner.deactivate(application, transaction));
}
/**
* Returns the target cluster resources, a value between the min and max in the requested capacity,
* and updates the application store with the received min and max,
*/
/** Returns the current resources of this cluster, if it's already deployed and inside the requested limits */
private Optional<ClusterResources> currentResources(ApplicationId applicationId,
ClusterSpec.Id clusterId,
Capacity requested) {
List<Node> nodes = NodeList.copyOf(nodeRepository.getNodes(applicationId, Node.State.active))
.cluster(clusterId).not().retired().asList();
if (nodes.size() < 1) return Optional.empty();
long groups = nodes.stream().map(node -> node.allocation().get().membership().cluster().group()).distinct().count();
var resources = new ClusterResources(nodes.size(), (int)groups, nodes.get(0).flavor().resources());
if ( ! resources.isWithin(requested.minResources(), requested.maxResources())) return Optional.empty();
return Optional.of(resources);
}
private void logIfDownscaled(int targetNodes, int actualNodes, ClusterSpec cluster, ProvisionLogger logger) {
if (zone.environment().isManuallyDeployed() && actualNodes < targetNodes)
logger.log(Level.INFO, "Requested " + targetNodes + " nodes for " + cluster +
", downscaling to " + actualNodes + " nodes in " + zone.environment());
}
private boolean hasQuota(ApplicationId application, int requestedNodes) {
if ( ! this.zone.system().isPublic()) return true;
if (application.tenant().value().hashCode() == 3857) return requestedNodes <= 60;
if (application.tenant().value().hashCode() == -1271827001) return requestedNodes <= 75;
return requestedNodes <= 5;
}
private List<HostSpec> asSortedHosts(List<Node> nodes, NodeResources requestedResources) {
nodes.sort(Comparator.comparingInt(node -> node.allocation().get().membership().index()));
List<HostSpec> hosts = new ArrayList<>(nodes.size());
for (Node node : nodes) {
log.log(LogLevel.DEBUG, () -> "Prepared node " + node.hostname() + " - " + node.flavor());
Allocation nodeAllocation = node.allocation().orElseThrow(IllegalStateException::new);
hosts.add(new HostSpec(node.hostname(),
List.of(),
Optional.of(node.flavor()),
Optional.of(nodeAllocation.membership()),
node.status().vespaVersion(),
nodeAllocation.networkPorts(),
requestedResources == NodeResources.unspecified ? Optional.empty() : Optional.of(requestedResources),
node.status().dockerImage().map(DockerImage::repository)));
if (nodeAllocation.networkPorts().isPresent()) {
log.log(LogLevel.DEBUG, () -> "Prepared node " + node.hostname() + " has port allocations");
}
}
return hosts;
}
private void validate(Collection<HostSpec> hosts) {
for (HostSpec host : hosts) {
if (host.membership().isEmpty())
throw new IllegalArgumentException("Hosts must be assigned a cluster when activating, but got " + host);
if (host.membership().get().cluster().group().isEmpty())
throw new IllegalArgumentException("Hosts must be assigned a group when activating, but got " + host);
}
}
} |
```suggestion if (closed) throw new IllegalStateException(this + " is closed"); ``` | public Optional<Mutex> applicationLock() {
if (closed) throw new IllegalStateException(this + "is closed");
return lock;
} | if (closed) throw new IllegalStateException(this + "is closed"); | public Optional<Mutex> applicationLock() {
if (closed) throw new IllegalStateException(this + " is closed");
return lock;
} | class MaintenanceDeployment implements Closeable {
private static final Logger log = Logger.getLogger(MaintenanceDeployment.class.getName());
private final ApplicationId application;
private final Optional<Mutex> lock;
private final Optional<Deployment> deployment;
private boolean closed = false;
public MaintenanceDeployment(ApplicationId application, Deployer deployer, NodeRepository nodeRepository) {
this.application = application;
Optional<Mutex> lock = tryLock(application, nodeRepository);
try {
deployment = tryDeployment(lock, application, deployer, nodeRepository);
this.lock = lock;
lock = Optional.empty();
} finally {
lock.ifPresent(Mutex::close);
}
}
/** Return whether this is - as yet - functional and can be used to carry out the deployment */
public boolean isValid() {
return deployment.isPresent();
}
/**
* Returns the application lock held by this, or empty if it is not held.
*
* @throws IllegalStateException id this is called when closed
*/
public boolean prepare() {
return doStep(() -> deployment.get().prepare());
}
public boolean activate() {
return doStep(() -> deployment.get().activate());
}
private boolean doStep(Runnable action) {
if (closed) throw new IllegalStateException(this + "' is closed");
if ( ! isValid()) return false;
try {
action.run();
return true;
} catch (TransientException e) {
log.log(LogLevel.INFO, "Failed to maintenance deploy " + application + " with a transient error: " +
Exceptions.toMessageString(e));
return false;
} catch (RuntimeException e) {
log.log(LogLevel.WARNING, "Exception on maintenance deploy of " + application, e);
return false;
}
}
private Optional<Mutex> tryLock(ApplicationId application, NodeRepository nodeRepository) {
try {
return Optional.of(nodeRepository.lock(application, Duration.ofSeconds(1)));
}
catch (ApplicationLockException e) {
return Optional.empty();
}
}
private Optional<Deployment> tryDeployment(Optional<Mutex> lock,
ApplicationId application,
Deployer deployer,
NodeRepository nodeRepository) {
if (lock.isEmpty()) return Optional.empty();
if (nodeRepository.getNodes(application, Node.State.active).isEmpty()) return Optional.empty();
return deployer.deployFromLocalActive(application);
}
@Override
public void close() {
lock.ifPresent(l -> l.close());
closed = true;
}
@Override
public String toString() {
return "deployment of " + application;
}
} | class MaintenanceDeployment implements Closeable {
private static final Logger log = Logger.getLogger(MaintenanceDeployment.class.getName());
private final ApplicationId application;
private final Optional<Mutex> lock;
private final Optional<Deployment> deployment;
private boolean closed = false;
public MaintenanceDeployment(ApplicationId application, Deployer deployer, NodeRepository nodeRepository) {
this.application = application;
Optional<Mutex> lock = tryLock(application, nodeRepository);
try {
deployment = tryDeployment(lock, application, deployer, nodeRepository);
this.lock = lock;
lock = Optional.empty();
} finally {
lock.ifPresent(Mutex::close);
}
}
/** Return whether this is - as yet - functional and can be used to carry out the deployment */
public boolean isValid() {
return deployment.isPresent();
}
/**
* Returns the application lock held by this, or empty if it is not held.
*
* @throws IllegalStateException id this is called when closed
*/
public boolean prepare() {
return doStep(() -> deployment.get().prepare());
}
public boolean activate() {
return doStep(() -> deployment.get().activate());
}
private boolean doStep(Runnable action) {
if (closed) throw new IllegalStateException(this + "' is closed");
if ( ! isValid()) return false;
try {
action.run();
return true;
} catch (TransientException e) {
log.log(LogLevel.INFO, "Failed to maintenance deploy " + application + " with a transient error: " +
Exceptions.toMessageString(e));
return false;
} catch (RuntimeException e) {
log.log(LogLevel.WARNING, "Exception on maintenance deploy of " + application, e);
return false;
}
}
private Optional<Mutex> tryLock(ApplicationId application, NodeRepository nodeRepository) {
try {
return Optional.of(nodeRepository.lock(application, Duration.ofSeconds(1)));
}
catch (ApplicationLockException e) {
return Optional.empty();
}
}
private Optional<Deployment> tryDeployment(Optional<Mutex> lock,
ApplicationId application,
Deployer deployer,
NodeRepository nodeRepository) {
if (lock.isEmpty()) return Optional.empty();
if (nodeRepository.getNodes(application, Node.State.active).isEmpty()) return Optional.empty();
return deployer.deployFromLocalActive(application);
}
@Override
public void close() {
lock.ifPresent(l -> l.close());
closed = true;
}
@Override
public String toString() {
return "deployment of " + application;
}
} |
Nitpicking: I guess "chunk" is now more than just "chunk". | public void getConfig(ProtonConfig.Summary.Log.Builder log) {
if (maxFileSize!=null) log.maxfilesize(maxFileSize);
if (minFileSizeFactor!=null) log.minfilesizefactor(minFileSizeFactor);
if (chunk != null) {
chunk.getConfig(log.chunk);
chunk.getConfig(log.compact);
}
} | chunk.getConfig(log.compact); | public void getConfig(ProtonConfig.Summary.Log.Builder log) {
if (maxFileSize!=null) log.maxfilesize(maxFileSize);
if (minFileSizeFactor!=null) log.minfilesizefactor(minFileSizeFactor);
if (chunk != null) {
chunk.getConfig(log.chunk);
chunk.getConfig(log.compact);
}
} | class LogStore {
public Long maxFileSize = null;
public Component chunk = null;
public Double minFileSizeFactor = null;
} | class LogStore {
public Long maxFileSize = null;
public Component chunk = null;
public Double minFileSizeFactor = null;
} |
Nitpicking: Inconsistent spacing. | public void getConfig(ProtonConfig.Summary.Log.Compact.Builder compact) {
if (compression != null) {
compression.getConfig(compact.compression);
}
} | compression.getConfig(compact.compression); | public void getConfig(ProtonConfig.Summary.Log.Compact.Builder compact) {
if (compression != null) {
compression.getConfig(compact.compression);
}
} | class Component {
public Long maxSize = null;
public Double maxSizePercent = null;
public Long initialEntries = null;
public Compression compression = null;
private final boolean outputInt;
public Component() {
this.outputInt = false;
}
public Component(boolean outputInt) {
this.outputInt = outputInt;
}
public void getConfig(ProtonConfig.Summary.Cache.Builder cache) {
if (outputInt) {
if (maxSizePercent !=null) cache.maxbytes(-maxSizePercent.longValue());
if (maxSize!=null) cache.maxbytes(maxSize.intValue());
if (initialEntries!=null) cache.initialentries(initialEntries.intValue());
} else {
if (maxSizePercent !=null) cache.maxbytes(-maxSizePercent.longValue());
if (maxSize!=null) cache.maxbytes(maxSize);
if (initialEntries!=null) cache.initialentries(initialEntries);
}
if (compression != null) {
compression.getConfig(cache.compression);
}
}
public void getConfig(ProtonConfig.Summary.Log.Chunk.Builder chunk) {
if (outputInt) {
if (maxSize!=null) chunk.maxbytes(maxSize.intValue());
} else {
throw new IllegalStateException("Fix this, chunk does not have long types");
}
if (compression != null) {
compression.getConfig(chunk.compression);
}
}
} | class Component {
public Long maxSize = null;
public Double maxSizePercent = null;
public Long initialEntries = null;
public Compression compression = null;
private final boolean outputInt;
public Component() {
this.outputInt = false;
}
public Component(boolean outputInt) {
this.outputInt = outputInt;
}
public void getConfig(ProtonConfig.Summary.Cache.Builder cache) {
if (outputInt) {
if (maxSizePercent !=null) cache.maxbytes(-maxSizePercent.longValue());
if (maxSize!=null) cache.maxbytes(maxSize.intValue());
if (initialEntries!=null) cache.initialentries(initialEntries.intValue());
} else {
if (maxSizePercent !=null) cache.maxbytes(-maxSizePercent.longValue());
if (maxSize!=null) cache.maxbytes(maxSize);
if (initialEntries!=null) cache.initialentries(initialEntries);
}
if (compression != null) {
compression.getConfig(cache.compression);
}
}
public void getConfig(ProtonConfig.Summary.Log.Chunk.Builder chunk) {
if (outputInt) {
if (maxSize!=null) chunk.maxbytes(maxSize.intValue());
} else {
throw new IllegalStateException("Fix this, chunk does not have long types");
}
if (compression != null) {
compression.getConfig(chunk.compression);
}
}
} |
Nitpicking: Spacing is (was already) inconsistent between the 3 getConfig() methods here. | public void getConfig(ProtonConfig.Summary.Log.Compact.Compression.Builder compression) {
if (type != null) compression.type(ProtonConfig.Summary.Log.Compact.Compression.Type.Enum.valueOf(type.name));
if (level != null) compression.level(level);
} | if (type != null) compression.type(ProtonConfig.Summary.Log.Compact.Compression.Type.Enum.valueOf(type.name)); | public void getConfig(ProtonConfig.Summary.Log.Compact.Compression.Builder compression) {
if (type != null) compression.type(ProtonConfig.Summary.Log.Compact.Compression.Type.Enum.valueOf(type.name));
if (level != null) compression.level(level);
} | class Compression {
public enum Type {
NONE("NONE"),
ZSTD("ZSTD"),
LZ4("LZ4");
public final String name;
Type(String name) {
this.name = name;
}
public static Type fromString(String name) {
for (Type type : Type.values()) {
if (toLowerCase(name).equals(toLowerCase(type.name))) {
return type;
}
}
return NONE;
}
}
public Type type = null;
public Integer level = null;
public void getConfig(ProtonConfig.Summary.Cache.Compression.Builder compression) {
if (type != null) compression.type(ProtonConfig.Summary.Cache.Compression.Type.Enum.valueOf(type.name));
if (level != null) compression.level(level);
}
public void getConfig(ProtonConfig.Summary.Log.Chunk.Compression.Builder compression) {
if (type != null) compression.type(ProtonConfig.Summary.Log.Chunk.Compression.Type.Enum.valueOf(type.name));
if (level != null) compression.level(level);
}
} | class Compression {
public enum Type {
NONE("NONE"),
ZSTD("ZSTD"),
LZ4("LZ4");
public final String name;
Type(String name) {
this.name = name;
}
public static Type fromString(String name) {
for (Type type : Type.values()) {
if (toLowerCase(name).equals(toLowerCase(type.name))) {
return type;
}
}
return NONE;
}
}
public Type type = null;
public Integer level = null;
public void getConfig(ProtonConfig.Summary.Cache.Compression.Builder compression) {
if (type != null) compression.type(ProtonConfig.Summary.Cache.Compression.Type.Enum.valueOf(type.name));
if (level != null) compression.level(level);
}
public void getConfig(ProtonConfig.Summary.Log.Chunk.Compression.Builder compression) {
if (type != null) compression.type(ProtonConfig.Summary.Log.Chunk.Compression.Type.Enum.valueOf(type.name));
if (level != null) compression.level(level);
}
} |
Yes, but it is under the chunk tag in services so keeping that link here too. | public void getConfig(ProtonConfig.Summary.Log.Builder log) {
if (maxFileSize!=null) log.maxfilesize(maxFileSize);
if (minFileSizeFactor!=null) log.minfilesizefactor(minFileSizeFactor);
if (chunk != null) {
chunk.getConfig(log.chunk);
chunk.getConfig(log.compact);
}
} | chunk.getConfig(log.compact); | public void getConfig(ProtonConfig.Summary.Log.Builder log) {
if (maxFileSize!=null) log.maxfilesize(maxFileSize);
if (minFileSizeFactor!=null) log.minfilesizefactor(minFileSizeFactor);
if (chunk != null) {
chunk.getConfig(log.chunk);
chunk.getConfig(log.compact);
}
} | class LogStore {
public Long maxFileSize = null;
public Component chunk = null;
public Double minFileSizeFactor = null;
} | class LogStore {
public Long maxFileSize = null;
public Component chunk = null;
public Double minFileSizeFactor = null;
} |
Unified | public void getConfig(ProtonConfig.Summary.Log.Compact.Compression.Builder compression) {
if (type != null) compression.type(ProtonConfig.Summary.Log.Compact.Compression.Type.Enum.valueOf(type.name));
if (level != null) compression.level(level);
} | if (type != null) compression.type(ProtonConfig.Summary.Log.Compact.Compression.Type.Enum.valueOf(type.name)); | public void getConfig(ProtonConfig.Summary.Log.Compact.Compression.Builder compression) {
if (type != null) compression.type(ProtonConfig.Summary.Log.Compact.Compression.Type.Enum.valueOf(type.name));
if (level != null) compression.level(level);
} | class Compression {
public enum Type {
NONE("NONE"),
ZSTD("ZSTD"),
LZ4("LZ4");
public final String name;
Type(String name) {
this.name = name;
}
public static Type fromString(String name) {
for (Type type : Type.values()) {
if (toLowerCase(name).equals(toLowerCase(type.name))) {
return type;
}
}
return NONE;
}
}
public Type type = null;
public Integer level = null;
public void getConfig(ProtonConfig.Summary.Cache.Compression.Builder compression) {
if (type != null) compression.type(ProtonConfig.Summary.Cache.Compression.Type.Enum.valueOf(type.name));
if (level != null) compression.level(level);
}
public void getConfig(ProtonConfig.Summary.Log.Chunk.Compression.Builder compression) {
if (type != null) compression.type(ProtonConfig.Summary.Log.Chunk.Compression.Type.Enum.valueOf(type.name));
if (level != null) compression.level(level);
}
} | class Compression {
public enum Type {
NONE("NONE"),
ZSTD("ZSTD"),
LZ4("LZ4");
public final String name;
Type(String name) {
this.name = name;
}
public static Type fromString(String name) {
for (Type type : Type.values()) {
if (toLowerCase(name).equals(toLowerCase(type.name))) {
return type;
}
}
return NONE;
}
}
public Type type = null;
public Integer level = null;
public void getConfig(ProtonConfig.Summary.Cache.Compression.Builder compression) {
if (type != null) compression.type(ProtonConfig.Summary.Cache.Compression.Type.Enum.valueOf(type.name));
if (level != null) compression.level(level);
}
public void getConfig(ProtonConfig.Summary.Log.Chunk.Compression.Builder compression) {
if (type != null) compression.type(ProtonConfig.Summary.Log.Chunk.Compression.Type.Enum.valueOf(type.name));
if (level != null) compression.level(level);
}
} |
Unified | public void getConfig(ProtonConfig.Summary.Log.Compact.Builder compact) {
if (compression != null) {
compression.getConfig(compact.compression);
}
} | compression.getConfig(compact.compression); | public void getConfig(ProtonConfig.Summary.Log.Compact.Builder compact) {
if (compression != null) {
compression.getConfig(compact.compression);
}
} | class Component {
public Long maxSize = null;
public Double maxSizePercent = null;
public Long initialEntries = null;
public Compression compression = null;
private final boolean outputInt;
public Component() {
this.outputInt = false;
}
public Component(boolean outputInt) {
this.outputInt = outputInt;
}
public void getConfig(ProtonConfig.Summary.Cache.Builder cache) {
if (outputInt) {
if (maxSizePercent !=null) cache.maxbytes(-maxSizePercent.longValue());
if (maxSize!=null) cache.maxbytes(maxSize.intValue());
if (initialEntries!=null) cache.initialentries(initialEntries.intValue());
} else {
if (maxSizePercent !=null) cache.maxbytes(-maxSizePercent.longValue());
if (maxSize!=null) cache.maxbytes(maxSize);
if (initialEntries!=null) cache.initialentries(initialEntries);
}
if (compression != null) {
compression.getConfig(cache.compression);
}
}
public void getConfig(ProtonConfig.Summary.Log.Chunk.Builder chunk) {
if (outputInt) {
if (maxSize!=null) chunk.maxbytes(maxSize.intValue());
} else {
throw new IllegalStateException("Fix this, chunk does not have long types");
}
if (compression != null) {
compression.getConfig(chunk.compression);
}
}
} | class Component {
public Long maxSize = null;
public Double maxSizePercent = null;
public Long initialEntries = null;
public Compression compression = null;
private final boolean outputInt;
public Component() {
this.outputInt = false;
}
public Component(boolean outputInt) {
this.outputInt = outputInt;
}
public void getConfig(ProtonConfig.Summary.Cache.Builder cache) {
if (outputInt) {
if (maxSizePercent !=null) cache.maxbytes(-maxSizePercent.longValue());
if (maxSize!=null) cache.maxbytes(maxSize.intValue());
if (initialEntries!=null) cache.initialentries(initialEntries.intValue());
} else {
if (maxSizePercent !=null) cache.maxbytes(-maxSizePercent.longValue());
if (maxSize!=null) cache.maxbytes(maxSize);
if (initialEntries!=null) cache.initialentries(initialEntries);
}
if (compression != null) {
compression.getConfig(cache.compression);
}
}
public void getConfig(ProtonConfig.Summary.Log.Chunk.Builder chunk) {
if (outputInt) {
if (maxSize!=null) chunk.maxbytes(maxSize.intValue());
} else {
throw new IllegalStateException("Fix this, chunk does not have long types");
}
if (compression != null) {
compression.getConfig(chunk.compression);
}
}
} |
Consider reducing sleep to `Thread.sleep(1)` to minimize runtime of test. | public void bundles_are_uninstalled() throws InterruptedException {
var bundle = new UninstallableMockBundle();
deconstructor.deconstruct(emptyList(), singleton(bundle));
int cnt = 0;
while (! bundle.uninstalled && (cnt++ < 12000)) {
Thread.sleep(10);
}
assertTrue(bundle.uninstalled);
} | Thread.sleep(10); | public void bundles_are_uninstalled() throws InterruptedException {
var bundle = new UninstallableMockBundle();
deconstructor.deconstruct(emptyList(), singleton(bundle));
int cnt = 0;
while (! bundle.uninstalled && (cnt++ < 12000)) {
Thread.sleep(10);
}
assertTrue(bundle.uninstalled);
} | class DeconstructorTest {
public static Deconstructor deconstructor;
@Before
public void init() {
deconstructor = new Deconstructor(false);
}
@Test
public void require_abstract_component_destructed() throws InterruptedException {
TestAbstractComponent abstractComponent = new TestAbstractComponent();
deconstructor.deconstruct(singleton(abstractComponent), emptyList());
int cnt = 0;
while (! abstractComponent.destructed && (cnt++ < 12000)) {
Thread.sleep(10);
}
assertTrue(abstractComponent.destructed);
}
@Test
public void require_provider_destructed() {
TestProvider provider = new TestProvider();
deconstructor.deconstruct(singleton(provider), emptyList());
assertTrue(provider.destructed);
}
@Test
public void require_shared_resource_released() {
TestSharedResource sharedResource = new TestSharedResource();
deconstructor.deconstruct(singleton(sharedResource), emptyList());
assertTrue(sharedResource.released);
}
@Test
private static class TestAbstractComponent extends AbstractComponent {
boolean destructed = false;
@Override public void deconstruct() { destructed = true; }
}
private static class TestProvider implements Provider<Void> {
boolean destructed = false;
@Override public Void get() { return null; }
@Override public void deconstruct() { destructed = true; }
}
private static class TestSharedResource implements SharedResource {
boolean released = false;
@Override public ResourceReference refer() { return null; }
@Override public void release() { released = true; }
}
private static class UninstallableMockBundle extends MockBundle {
boolean uninstalled = false;
@Override public void uninstall() {
uninstalled = true;
}
}
} | class DeconstructorTest {
public static Deconstructor deconstructor;
@Before
public void init() {
deconstructor = new Deconstructor(false);
}
@Test
public void require_abstract_component_destructed() throws InterruptedException {
TestAbstractComponent abstractComponent = new TestAbstractComponent();
deconstructor.deconstruct(singleton(abstractComponent), emptyList());
int cnt = 0;
while (! abstractComponent.destructed && (cnt++ < 12000)) {
Thread.sleep(10);
}
assertTrue(abstractComponent.destructed);
}
@Test
public void require_provider_destructed() {
TestProvider provider = new TestProvider();
deconstructor.deconstruct(singleton(provider), emptyList());
assertTrue(provider.destructed);
}
@Test
public void require_shared_resource_released() {
TestSharedResource sharedResource = new TestSharedResource();
deconstructor.deconstruct(singleton(sharedResource), emptyList());
assertTrue(sharedResource.released);
}
@Test
private static class TestAbstractComponent extends AbstractComponent {
boolean destructed = false;
@Override public void deconstruct() { destructed = true; }
}
private static class TestProvider implements Provider<Void> {
volatile boolean destructed = false;
@Override public Void get() { return null; }
@Override public void deconstruct() { destructed = true; }
}
private static class TestSharedResource implements SharedResource {
volatile boolean released = false;
@Override public ResourceReference refer() { return null; }
@Override public void release() { released = true; }
}
private static class UninstallableMockBundle extends MockBundle {
boolean uninstalled = false;
@Override public void uninstall() {
uninstalled = true;
}
}
} |
This is millis, so 1/100 of a sec. Is that too much? | public void bundles_are_uninstalled() throws InterruptedException {
var bundle = new UninstallableMockBundle();
deconstructor.deconstruct(emptyList(), singleton(bundle));
int cnt = 0;
while (! bundle.uninstalled && (cnt++ < 12000)) {
Thread.sleep(10);
}
assertTrue(bundle.uninstalled);
} | Thread.sleep(10); | public void bundles_are_uninstalled() throws InterruptedException {
var bundle = new UninstallableMockBundle();
deconstructor.deconstruct(emptyList(), singleton(bundle));
int cnt = 0;
while (! bundle.uninstalled && (cnt++ < 12000)) {
Thread.sleep(10);
}
assertTrue(bundle.uninstalled);
} | class DeconstructorTest {
public static Deconstructor deconstructor;
@Before
public void init() {
deconstructor = new Deconstructor(false);
}
@Test
public void require_abstract_component_destructed() throws InterruptedException {
TestAbstractComponent abstractComponent = new TestAbstractComponent();
deconstructor.deconstruct(singleton(abstractComponent), emptyList());
int cnt = 0;
while (! abstractComponent.destructed && (cnt++ < 12000)) {
Thread.sleep(10);
}
assertTrue(abstractComponent.destructed);
}
@Test
public void require_provider_destructed() {
TestProvider provider = new TestProvider();
deconstructor.deconstruct(singleton(provider), emptyList());
assertTrue(provider.destructed);
}
@Test
public void require_shared_resource_released() {
TestSharedResource sharedResource = new TestSharedResource();
deconstructor.deconstruct(singleton(sharedResource), emptyList());
assertTrue(sharedResource.released);
}
@Test
private static class TestAbstractComponent extends AbstractComponent {
boolean destructed = false;
@Override public void deconstruct() { destructed = true; }
}
private static class TestProvider implements Provider<Void> {
boolean destructed = false;
@Override public Void get() { return null; }
@Override public void deconstruct() { destructed = true; }
}
private static class TestSharedResource implements SharedResource {
boolean released = false;
@Override public ResourceReference refer() { return null; }
@Override public void release() { released = true; }
}
private static class UninstallableMockBundle extends MockBundle {
boolean uninstalled = false;
@Override public void uninstall() {
uninstalled = true;
}
}
} | class DeconstructorTest {
public static Deconstructor deconstructor;
@Before
public void init() {
deconstructor = new Deconstructor(false);
}
@Test
public void require_abstract_component_destructed() throws InterruptedException {
TestAbstractComponent abstractComponent = new TestAbstractComponent();
deconstructor.deconstruct(singleton(abstractComponent), emptyList());
int cnt = 0;
while (! abstractComponent.destructed && (cnt++ < 12000)) {
Thread.sleep(10);
}
assertTrue(abstractComponent.destructed);
}
@Test
public void require_provider_destructed() {
TestProvider provider = new TestProvider();
deconstructor.deconstruct(singleton(provider), emptyList());
assertTrue(provider.destructed);
}
@Test
public void require_shared_resource_released() {
TestSharedResource sharedResource = new TestSharedResource();
deconstructor.deconstruct(singleton(sharedResource), emptyList());
assertTrue(sharedResource.released);
}
@Test
private static class TestAbstractComponent extends AbstractComponent {
boolean destructed = false;
@Override public void deconstruct() { destructed = true; }
}
private static class TestProvider implements Provider<Void> {
volatile boolean destructed = false;
@Override public Void get() { return null; }
@Override public void deconstruct() { destructed = true; }
}
private static class TestSharedResource implements SharedResource {
volatile boolean released = false;
@Override public ResourceReference refer() { return null; }
@Override public void release() { released = true; }
}
private static class UninstallableMockBundle extends MockBundle {
boolean uninstalled = false;
@Override public void uninstall() {
uninstalled = true;
}
}
} |
Done | public void bundles_are_uninstalled() throws InterruptedException {
var bundle = new UninstallableMockBundle();
deconstructor.deconstruct(emptyList(), singleton(bundle));
int cnt = 0;
while (! bundle.uninstalled && (cnt++ < 12000)) {
Thread.sleep(10);
}
assertTrue(bundle.uninstalled);
} | Thread.sleep(10); | public void bundles_are_uninstalled() throws InterruptedException {
var bundle = new UninstallableMockBundle();
deconstructor.deconstruct(emptyList(), singleton(bundle));
int cnt = 0;
while (! bundle.uninstalled && (cnt++ < 12000)) {
Thread.sleep(10);
}
assertTrue(bundle.uninstalled);
} | class DeconstructorTest {
public static Deconstructor deconstructor;
@Before
public void init() {
deconstructor = new Deconstructor(false);
}
@Test
public void require_abstract_component_destructed() throws InterruptedException {
TestAbstractComponent abstractComponent = new TestAbstractComponent();
deconstructor.deconstruct(singleton(abstractComponent), emptyList());
int cnt = 0;
while (! abstractComponent.destructed && (cnt++ < 12000)) {
Thread.sleep(10);
}
assertTrue(abstractComponent.destructed);
}
@Test
public void require_provider_destructed() {
TestProvider provider = new TestProvider();
deconstructor.deconstruct(singleton(provider), emptyList());
assertTrue(provider.destructed);
}
@Test
public void require_shared_resource_released() {
TestSharedResource sharedResource = new TestSharedResource();
deconstructor.deconstruct(singleton(sharedResource), emptyList());
assertTrue(sharedResource.released);
}
@Test
private static class TestAbstractComponent extends AbstractComponent {
boolean destructed = false;
@Override public void deconstruct() { destructed = true; }
}
private static class TestProvider implements Provider<Void> {
boolean destructed = false;
@Override public Void get() { return null; }
@Override public void deconstruct() { destructed = true; }
}
private static class TestSharedResource implements SharedResource {
boolean released = false;
@Override public ResourceReference refer() { return null; }
@Override public void release() { released = true; }
}
private static class UninstallableMockBundle extends MockBundle {
boolean uninstalled = false;
@Override public void uninstall() {
uninstalled = true;
}
}
} | class DeconstructorTest {
public static Deconstructor deconstructor;
@Before
public void init() {
deconstructor = new Deconstructor(false);
}
@Test
public void require_abstract_component_destructed() throws InterruptedException {
TestAbstractComponent abstractComponent = new TestAbstractComponent();
deconstructor.deconstruct(singleton(abstractComponent), emptyList());
int cnt = 0;
while (! abstractComponent.destructed && (cnt++ < 12000)) {
Thread.sleep(10);
}
assertTrue(abstractComponent.destructed);
}
@Test
public void require_provider_destructed() {
TestProvider provider = new TestProvider();
deconstructor.deconstruct(singleton(provider), emptyList());
assertTrue(provider.destructed);
}
@Test
public void require_shared_resource_released() {
TestSharedResource sharedResource = new TestSharedResource();
deconstructor.deconstruct(singleton(sharedResource), emptyList());
assertTrue(sharedResource.released);
}
@Test
private static class TestAbstractComponent extends AbstractComponent {
boolean destructed = false;
@Override public void deconstruct() { destructed = true; }
}
private static class TestProvider implements Provider<Void> {
volatile boolean destructed = false;
@Override public Void get() { return null; }
@Override public void deconstruct() { destructed = true; }
}
private static class TestSharedResource implements SharedResource {
volatile boolean released = false;
@Override public ResourceReference refer() { return null; }
@Override public void release() { released = true; }
}
private static class UninstallableMockBundle extends MockBundle {
boolean uninstalled = false;
@Override public void uninstall() {
uninstalled = true;
}
}
} |
oh.. too late | public void bundles_are_uninstalled() throws InterruptedException {
var bundle = new UninstallableMockBundle();
deconstructor.deconstruct(emptyList(), singleton(bundle));
int cnt = 0;
while (! bundle.uninstalled && (cnt++ < 12000)) {
Thread.sleep(10);
}
assertTrue(bundle.uninstalled);
} | Thread.sleep(10); | public void bundles_are_uninstalled() throws InterruptedException {
var bundle = new UninstallableMockBundle();
deconstructor.deconstruct(emptyList(), singleton(bundle));
int cnt = 0;
while (! bundle.uninstalled && (cnt++ < 12000)) {
Thread.sleep(10);
}
assertTrue(bundle.uninstalled);
} | class DeconstructorTest {
public static Deconstructor deconstructor;
@Before
public void init() {
deconstructor = new Deconstructor(false);
}
@Test
public void require_abstract_component_destructed() throws InterruptedException {
TestAbstractComponent abstractComponent = new TestAbstractComponent();
deconstructor.deconstruct(singleton(abstractComponent), emptyList());
int cnt = 0;
while (! abstractComponent.destructed && (cnt++ < 12000)) {
Thread.sleep(10);
}
assertTrue(abstractComponent.destructed);
}
@Test
public void require_provider_destructed() {
TestProvider provider = new TestProvider();
deconstructor.deconstruct(singleton(provider), emptyList());
assertTrue(provider.destructed);
}
@Test
public void require_shared_resource_released() {
TestSharedResource sharedResource = new TestSharedResource();
deconstructor.deconstruct(singleton(sharedResource), emptyList());
assertTrue(sharedResource.released);
}
@Test
private static class TestAbstractComponent extends AbstractComponent {
boolean destructed = false;
@Override public void deconstruct() { destructed = true; }
}
private static class TestProvider implements Provider<Void> {
boolean destructed = false;
@Override public Void get() { return null; }
@Override public void deconstruct() { destructed = true; }
}
private static class TestSharedResource implements SharedResource {
boolean released = false;
@Override public ResourceReference refer() { return null; }
@Override public void release() { released = true; }
}
private static class UninstallableMockBundle extends MockBundle {
boolean uninstalled = false;
@Override public void uninstall() {
uninstalled = true;
}
}
} | class DeconstructorTest {
public static Deconstructor deconstructor;
@Before
public void init() {
deconstructor = new Deconstructor(false);
}
@Test
public void require_abstract_component_destructed() throws InterruptedException {
TestAbstractComponent abstractComponent = new TestAbstractComponent();
deconstructor.deconstruct(singleton(abstractComponent), emptyList());
int cnt = 0;
while (! abstractComponent.destructed && (cnt++ < 12000)) {
Thread.sleep(10);
}
assertTrue(abstractComponent.destructed);
}
@Test
public void require_provider_destructed() {
TestProvider provider = new TestProvider();
deconstructor.deconstruct(singleton(provider), emptyList());
assertTrue(provider.destructed);
}
@Test
public void require_shared_resource_released() {
TestSharedResource sharedResource = new TestSharedResource();
deconstructor.deconstruct(singleton(sharedResource), emptyList());
assertTrue(sharedResource.released);
}
@Test
private static class TestAbstractComponent extends AbstractComponent {
boolean destructed = false;
@Override public void deconstruct() { destructed = true; }
}
private static class TestProvider implements Provider<Void> {
volatile boolean destructed = false;
@Override public Void get() { return null; }
@Override public void deconstruct() { destructed = true; }
}
private static class TestSharedResource implements SharedResource {
volatile boolean released = false;
@Override public ResourceReference refer() { return null; }
@Override public void release() { released = true; }
}
private static class UninstallableMockBundle extends MockBundle {
boolean uninstalled = false;
@Override public void uninstall() {
uninstalled = true;
}
}
} |
This should probably not be a system out here. Use the warnlog function. But, isn't this information already in the line below as the entire tuple is output to warnlog? If so, I don't see the point of adding additional output here. If not, rather change the exception out below to explicitly add the docid. No configuration should be necessary for this. | public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warnLog("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
warnLog("No valid operation found. Skipping.", PigWarning.UDF_WARNING_2);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warnLog("No valid document operation could be created.", PigWarning.UDF_WARNING_3);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
if (printIdOnError) {
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
System.out.println("Error occur when processing document with docID: " +docId);
}
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(getStackTraceAsString(e));
warnLog(sb.toString(), PigWarning.UDF_WARNING_4);
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 1);
}
return json;
} | System.out.println("Error occur when processing document with docID: " +docId); | public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warnLog("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
warnLog("No valid operation found. Skipping.", PigWarning.UDF_WARNING_2);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
if (verbose) {
System.out.println("Processing docId: "+ docId);
}
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warnLog("No valid document operation could be created.", PigWarning.UDF_WARNING_3);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(getStackTraceAsString(e));
warnLog(sb.toString(), PigWarning.UDF_WARNING_4);
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 1);
}
return json;
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String PROPERTY_PRINT_DOCID_WHEN_ERROR = "print-docid-when-error";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String UPDATE_TENSOR_FIELDS = "update-tensor-fields";
private static final String REMOVE_MAP_FIELDS = "remove-map-fields";
private static final String UPDATE_MAP_FIELDS = "update-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static boolean printIdOnError;
private static Map<String, String> mapPartialOperationMap;
static {
mapPartialOperationMap = new HashMap<>();
mapPartialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
mapPartialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private static Map<String, String> partialOperationMap;
static {
partialOperationMap = new HashMap<>();
partialOperationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
partialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 0);
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
printIdOnError = Boolean.parseBoolean(properties.getProperty(PROPERTY_PRINT_DOCID_WHEN_ERROR, "false"));
}
@Override
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
private static String getPartialOperation(Map<String, String> operationMap, String name, Properties properties) {
for (String label : operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
return operationMap.get(label);
}
}
}
return null;
}
@SuppressWarnings("unchecked")
private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
String operation = getPartialOperation(mapPartialOperationMap, name, properties);
if (operation != null) {
writePartialUpdateAndRemoveMap(name, value, g, properties, schema, op, depth, operation);
} else {
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
}
private static void writePartialUpdateAndRemoveMap(String name, Object value, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth, String operation) throws IOException {
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (operation.equals(PARTIAL_UPDATE_REMOVE)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
} else {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if (isRemoveTensor(name, properties)) {
writeRemoveTensor(map, g);
} else {
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth + 1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth + 1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth + 1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
String operation = getPartialOperation(partialOperationMap, name, properties);
if (operation != null) {
g.writeFieldName(operation);
} else {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(UPDATE_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null) {
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (simpleObjectFields != null) {
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(UPDATE_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties) {
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
private static String getStackTraceAsString(Throwable throwable) {
try (StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter, true)) {
throwable.printStackTrace(printWriter);
return stringWriter.getBuffer().toString();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void warnLog(String msg, PigWarning warning) {
warn(msg, warning);
System.err.println(msg);
}
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String PROPERTY_VERBOSE = "verbose";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String UPDATE_TENSOR_FIELDS = "update-tensor-fields";
private static final String REMOVE_MAP_FIELDS = "remove-map-fields";
private static final String UPDATE_MAP_FIELDS = "update-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static Map<String, String> mapPartialOperationMap;
static {
mapPartialOperationMap = new HashMap<>();
mapPartialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
mapPartialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private static Map<String, String> partialOperationMap;
static {
partialOperationMap = new HashMap<>();
partialOperationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
partialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private final boolean verbose;
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 0);
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
verbose = Boolean.parseBoolean(properties.getProperty(PROPERTY_VERBOSE, "false"));
}
@Override
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
private static String getPartialOperation(Map<String, String> operationMap, String name, Properties properties) {
for (String label : operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
return operationMap.get(label);
}
}
}
return null;
}
@SuppressWarnings("unchecked")
private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
String operation = getPartialOperation(mapPartialOperationMap, name, properties);
if (operation != null) {
writePartialUpdateAndRemoveMap(name, value, g, properties, schema, op, depth, operation);
} else {
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
}
private static void writePartialUpdateAndRemoveMap(String name, Object value, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth, String operation) throws IOException {
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (operation.equals(PARTIAL_UPDATE_REMOVE)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
} else {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if (isRemoveTensor(name, properties)) {
writeRemoveTensor(map, g);
} else {
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth + 1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth + 1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth + 1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
String operation = getPartialOperation(partialOperationMap, name, properties);
if (operation != null) {
g.writeFieldName(operation);
} else {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(UPDATE_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null) {
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (simpleObjectFields != null) {
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(UPDATE_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties) {
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
private static String getStackTraceAsString(Throwable throwable) {
try (StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter, true)) {
throwable.printStackTrace(printWriter);
return stringWriter.getBuffer().toString();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void warnLog(String msg, PigWarning warning) {
warn(msg, warning);
System.err.println(msg);
}
} |
Make sense. BTW, if we want to always print the docID out (System.out.println() will be at line 151, before the create function is called). Is it acceptable that we just print it out or should we make it a property just like in this PR? Thanks! | public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warnLog("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
warnLog("No valid operation found. Skipping.", PigWarning.UDF_WARNING_2);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warnLog("No valid document operation could be created.", PigWarning.UDF_WARNING_3);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
if (printIdOnError) {
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
System.out.println("Error occur when processing document with docID: " +docId);
}
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(getStackTraceAsString(e));
warnLog(sb.toString(), PigWarning.UDF_WARNING_4);
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 1);
}
return json;
} | System.out.println("Error occur when processing document with docID: " +docId); | public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warnLog("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
warnLog("No valid operation found. Skipping.", PigWarning.UDF_WARNING_2);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
if (verbose) {
System.out.println("Processing docId: "+ docId);
}
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warnLog("No valid document operation could be created.", PigWarning.UDF_WARNING_3);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(getStackTraceAsString(e));
warnLog(sb.toString(), PigWarning.UDF_WARNING_4);
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 1);
}
return json;
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String PROPERTY_PRINT_DOCID_WHEN_ERROR = "print-docid-when-error";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String UPDATE_TENSOR_FIELDS = "update-tensor-fields";
private static final String REMOVE_MAP_FIELDS = "remove-map-fields";
private static final String UPDATE_MAP_FIELDS = "update-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static boolean printIdOnError;
private static Map<String, String> mapPartialOperationMap;
static {
mapPartialOperationMap = new HashMap<>();
mapPartialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
mapPartialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private static Map<String, String> partialOperationMap;
static {
partialOperationMap = new HashMap<>();
partialOperationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
partialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 0);
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
printIdOnError = Boolean.parseBoolean(properties.getProperty(PROPERTY_PRINT_DOCID_WHEN_ERROR, "false"));
}
@Override
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
private static String getPartialOperation(Map<String, String> operationMap, String name, Properties properties) {
for (String label : operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
return operationMap.get(label);
}
}
}
return null;
}
@SuppressWarnings("unchecked")
private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
String operation = getPartialOperation(mapPartialOperationMap, name, properties);
if (operation != null) {
writePartialUpdateAndRemoveMap(name, value, g, properties, schema, op, depth, operation);
} else {
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
}
private static void writePartialUpdateAndRemoveMap(String name, Object value, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth, String operation) throws IOException {
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (operation.equals(PARTIAL_UPDATE_REMOVE)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
} else {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if (isRemoveTensor(name, properties)) {
writeRemoveTensor(map, g);
} else {
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth + 1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth + 1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth + 1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
String operation = getPartialOperation(partialOperationMap, name, properties);
if (operation != null) {
g.writeFieldName(operation);
} else {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(UPDATE_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null) {
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (simpleObjectFields != null) {
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(UPDATE_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties) {
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
private static String getStackTraceAsString(Throwable throwable) {
try (StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter, true)) {
throwable.printStackTrace(printWriter);
return stringWriter.getBuffer().toString();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void warnLog(String msg, PigWarning warning) {
warn(msg, warning);
System.err.println(msg);
}
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String PROPERTY_VERBOSE = "verbose";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String UPDATE_TENSOR_FIELDS = "update-tensor-fields";
private static final String REMOVE_MAP_FIELDS = "remove-map-fields";
private static final String UPDATE_MAP_FIELDS = "update-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static Map<String, String> mapPartialOperationMap;
static {
mapPartialOperationMap = new HashMap<>();
mapPartialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
mapPartialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private static Map<String, String> partialOperationMap;
static {
partialOperationMap = new HashMap<>();
partialOperationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
partialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private final boolean verbose;
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 0);
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
verbose = Boolean.parseBoolean(properties.getProperty(PROPERTY_VERBOSE, "false"));
}
@Override
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
private static String getPartialOperation(Map<String, String> operationMap, String name, Properties properties) {
for (String label : operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
return operationMap.get(label);
}
}
}
return null;
}
@SuppressWarnings("unchecked")
private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
String operation = getPartialOperation(mapPartialOperationMap, name, properties);
if (operation != null) {
writePartialUpdateAndRemoveMap(name, value, g, properties, schema, op, depth, operation);
} else {
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
}
private static void writePartialUpdateAndRemoveMap(String name, Object value, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth, String operation) throws IOException {
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (operation.equals(PARTIAL_UPDATE_REMOVE)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
} else {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if (isRemoveTensor(name, properties)) {
writeRemoveTensor(map, g);
} else {
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth + 1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth + 1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth + 1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
String operation = getPartialOperation(partialOperationMap, name, properties);
if (operation != null) {
g.writeFieldName(operation);
} else {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(UPDATE_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null) {
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (simpleObjectFields != null) {
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(UPDATE_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties) {
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
private static String getStackTraceAsString(Throwable throwable) {
try (StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter, true)) {
throwable.printStackTrace(printWriter);
return stringWriter.getBuffer().toString();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void warnLog(String msg, PigWarning warning) {
warn(msg, warning);
System.err.println(msg);
}
} |
@lesters | public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warnLog("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
warnLog("No valid operation found. Skipping.", PigWarning.UDF_WARNING_2);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warnLog("No valid document operation could be created.", PigWarning.UDF_WARNING_3);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
if (printIdOnError) {
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
System.out.println("Error occur when processing document with docID: " +docId);
}
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(getStackTraceAsString(e));
warnLog(sb.toString(), PigWarning.UDF_WARNING_4);
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 1);
}
return json;
} | System.out.println("Error occur when processing document with docID: " +docId); | public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warnLog("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
warnLog("No valid operation found. Skipping.", PigWarning.UDF_WARNING_2);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
if (verbose) {
System.out.println("Processing docId: "+ docId);
}
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warnLog("No valid document operation could be created.", PigWarning.UDF_WARNING_3);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(getStackTraceAsString(e));
warnLog(sb.toString(), PigWarning.UDF_WARNING_4);
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 1);
}
return json;
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String PROPERTY_PRINT_DOCID_WHEN_ERROR = "print-docid-when-error";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String UPDATE_TENSOR_FIELDS = "update-tensor-fields";
private static final String REMOVE_MAP_FIELDS = "remove-map-fields";
private static final String UPDATE_MAP_FIELDS = "update-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static boolean printIdOnError;
private static Map<String, String> mapPartialOperationMap;
static {
mapPartialOperationMap = new HashMap<>();
mapPartialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
mapPartialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private static Map<String, String> partialOperationMap;
static {
partialOperationMap = new HashMap<>();
partialOperationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
partialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 0);
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
printIdOnError = Boolean.parseBoolean(properties.getProperty(PROPERTY_PRINT_DOCID_WHEN_ERROR, "false"));
}
@Override
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
private static String getPartialOperation(Map<String, String> operationMap, String name, Properties properties) {
for (String label : operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
return operationMap.get(label);
}
}
}
return null;
}
@SuppressWarnings("unchecked")
private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
String operation = getPartialOperation(mapPartialOperationMap, name, properties);
if (operation != null) {
writePartialUpdateAndRemoveMap(name, value, g, properties, schema, op, depth, operation);
} else {
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
}
private static void writePartialUpdateAndRemoveMap(String name, Object value, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth, String operation) throws IOException {
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (operation.equals(PARTIAL_UPDATE_REMOVE)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
} else {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if (isRemoveTensor(name, properties)) {
writeRemoveTensor(map, g);
} else {
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth + 1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth + 1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth + 1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
String operation = getPartialOperation(partialOperationMap, name, properties);
if (operation != null) {
g.writeFieldName(operation);
} else {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(UPDATE_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null) {
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (simpleObjectFields != null) {
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(UPDATE_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties) {
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
private static String getStackTraceAsString(Throwable throwable) {
try (StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter, true)) {
throwable.printStackTrace(printWriter);
return stringWriter.getBuffer().toString();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void warnLog(String msg, PigWarning warning) {
warn(msg, warning);
System.err.println(msg);
}
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String PROPERTY_VERBOSE = "verbose";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String UPDATE_TENSOR_FIELDS = "update-tensor-fields";
private static final String REMOVE_MAP_FIELDS = "remove-map-fields";
private static final String UPDATE_MAP_FIELDS = "update-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static Map<String, String> mapPartialOperationMap;
static {
mapPartialOperationMap = new HashMap<>();
mapPartialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
mapPartialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private static Map<String, String> partialOperationMap;
static {
partialOperationMap = new HashMap<>();
partialOperationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
partialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private final boolean verbose;
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 0);
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
verbose = Boolean.parseBoolean(properties.getProperty(PROPERTY_VERBOSE, "false"));
}
@Override
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
private static String getPartialOperation(Map<String, String> operationMap, String name, Properties properties) {
for (String label : operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
return operationMap.get(label);
}
}
}
return null;
}
@SuppressWarnings("unchecked")
private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
String operation = getPartialOperation(mapPartialOperationMap, name, properties);
if (operation != null) {
writePartialUpdateAndRemoveMap(name, value, g, properties, schema, op, depth, operation);
} else {
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
}
private static void writePartialUpdateAndRemoveMap(String name, Object value, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth, String operation) throws IOException {
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (operation.equals(PARTIAL_UPDATE_REMOVE)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
} else {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if (isRemoveTensor(name, properties)) {
writeRemoveTensor(map, g);
} else {
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth + 1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth + 1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth + 1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
String operation = getPartialOperation(partialOperationMap, name, properties);
if (operation != null) {
g.writeFieldName(operation);
} else {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(UPDATE_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null) {
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (simpleObjectFields != null) {
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(UPDATE_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties) {
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
private static String getStackTraceAsString(Throwable throwable) {
try (StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter, true)) {
throwable.printStackTrace(printWriter);
return stringWriter.getBuffer().toString();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void warnLog(String msg, PigWarning warning) {
warn(msg, warning);
System.err.println(msg);
}
} |
Well, we definitely shouldn't be stdout'ing the document id for each document by default, as grid feeds usually have quite a large number of documents. If you want such a feature, add a `verbose` or `debug` property, and if this property is set you can output it. Make sure this property is by default off. | public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warnLog("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
warnLog("No valid operation found. Skipping.", PigWarning.UDF_WARNING_2);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warnLog("No valid document operation could be created.", PigWarning.UDF_WARNING_3);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
if (printIdOnError) {
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
System.out.println("Error occur when processing document with docID: " +docId);
}
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(getStackTraceAsString(e));
warnLog(sb.toString(), PigWarning.UDF_WARNING_4);
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 1);
}
return json;
} | System.out.println("Error occur when processing document with docID: " +docId); | public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warnLog("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
warnLog("No valid operation found. Skipping.", PigWarning.UDF_WARNING_2);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
if (verbose) {
System.out.println("Processing docId: "+ docId);
}
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warnLog("No valid document operation could be created.", PigWarning.UDF_WARNING_3);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(getStackTraceAsString(e));
warnLog(sb.toString(), PigWarning.UDF_WARNING_4);
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 1);
}
return json;
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String PROPERTY_PRINT_DOCID_WHEN_ERROR = "print-docid-when-error";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String UPDATE_TENSOR_FIELDS = "update-tensor-fields";
private static final String REMOVE_MAP_FIELDS = "remove-map-fields";
private static final String UPDATE_MAP_FIELDS = "update-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static boolean printIdOnError;
private static Map<String, String> mapPartialOperationMap;
static {
mapPartialOperationMap = new HashMap<>();
mapPartialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
mapPartialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private static Map<String, String> partialOperationMap;
static {
partialOperationMap = new HashMap<>();
partialOperationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
partialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 0);
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
printIdOnError = Boolean.parseBoolean(properties.getProperty(PROPERTY_PRINT_DOCID_WHEN_ERROR, "false"));
}
@Override
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
private static String getPartialOperation(Map<String, String> operationMap, String name, Properties properties) {
for (String label : operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
return operationMap.get(label);
}
}
}
return null;
}
@SuppressWarnings("unchecked")
private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
String operation = getPartialOperation(mapPartialOperationMap, name, properties);
if (operation != null) {
writePartialUpdateAndRemoveMap(name, value, g, properties, schema, op, depth, operation);
} else {
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
}
private static void writePartialUpdateAndRemoveMap(String name, Object value, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth, String operation) throws IOException {
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (operation.equals(PARTIAL_UPDATE_REMOVE)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
} else {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if (isRemoveTensor(name, properties)) {
writeRemoveTensor(map, g);
} else {
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth + 1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth + 1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth + 1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
String operation = getPartialOperation(partialOperationMap, name, properties);
if (operation != null) {
g.writeFieldName(operation);
} else {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(UPDATE_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null) {
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (simpleObjectFields != null) {
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(UPDATE_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties) {
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
private static String getStackTraceAsString(Throwable throwable) {
try (StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter, true)) {
throwable.printStackTrace(printWriter);
return stringWriter.getBuffer().toString();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void warnLog(String msg, PigWarning warning) {
warn(msg, warning);
System.err.println(msg);
}
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String PROPERTY_VERBOSE = "verbose";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String UPDATE_TENSOR_FIELDS = "update-tensor-fields";
private static final String REMOVE_MAP_FIELDS = "remove-map-fields";
private static final String UPDATE_MAP_FIELDS = "update-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static Map<String, String> mapPartialOperationMap;
static {
mapPartialOperationMap = new HashMap<>();
mapPartialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
mapPartialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private static Map<String, String> partialOperationMap;
static {
partialOperationMap = new HashMap<>();
partialOperationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
partialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private final boolean verbose;
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 0);
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
verbose = Boolean.parseBoolean(properties.getProperty(PROPERTY_VERBOSE, "false"));
}
@Override
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
private static String getPartialOperation(Map<String, String> operationMap, String name, Properties properties) {
for (String label : operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
return operationMap.get(label);
}
}
}
return null;
}
@SuppressWarnings("unchecked")
private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
String operation = getPartialOperation(mapPartialOperationMap, name, properties);
if (operation != null) {
writePartialUpdateAndRemoveMap(name, value, g, properties, schema, op, depth, operation);
} else {
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
}
private static void writePartialUpdateAndRemoveMap(String name, Object value, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth, String operation) throws IOException {
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (operation.equals(PARTIAL_UPDATE_REMOVE)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
} else {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if (isRemoveTensor(name, properties)) {
writeRemoveTensor(map, g);
} else {
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth + 1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth + 1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth + 1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
String operation = getPartialOperation(partialOperationMap, name, properties);
if (operation != null) {
g.writeFieldName(operation);
} else {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(UPDATE_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null) {
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (simpleObjectFields != null) {
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(UPDATE_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties) {
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
private static String getStackTraceAsString(Throwable throwable) {
try (StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter, true)) {
throwable.printStackTrace(printWriter);
return stringWriter.getBuffer().toString();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void warnLog(String msg, PigWarning warning) {
warn(msg, warning);
System.err.println(msg);
}
} |
`if (verbose) {` | public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warnLog("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
warnLog("No valid operation found. Skipping.", PigWarning.UDF_WARNING_2);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
if (printId) {
System.out.println("Processing docId: "+ docId);
}
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warnLog("No valid document operation could be created.", PigWarning.UDF_WARNING_3);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(getStackTraceAsString(e));
warnLog(sb.toString(), PigWarning.UDF_WARNING_4);
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 1);
}
return json;
} | if (printId) { | public String exec(Tuple tuple) throws IOException {
if (tuple == null || tuple.size() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
return null;
}
if (template == null || template.length() == 0) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
warnLog("No valid document id template found. Skipping.", PigWarning.UDF_WARNING_1);
return null;
}
if (operation == null) {
warnLog("No valid operation found. Skipping.", PigWarning.UDF_WARNING_2);
return null;
}
String json = null;
try {
if (reporter != null) {
reporter.progress();
}
Schema inputSchema = getInputSchema();
Map<String, Object> fields = TupleTools.tupleMap(inputSchema, tuple);
String docId = TupleTools.toString(fields, template);
if (verbose) {
System.out.println("Processing docId: "+ docId);
}
json = create(operation, docId, fields, properties, inputSchema);
if (json == null || json.length() == 0) {
warnLog("No valid document operation could be created.", PigWarning.UDF_WARNING_3);
return null;
}
} catch (Exception e) {
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 1);
}
StringBuilder sb = new StringBuilder();
sb.append("Caught exception processing input row: \n");
sb.append(tuple.toString());
sb.append("\nException: ");
sb.append(getStackTraceAsString(e));
warnLog(sb.toString(), PigWarning.UDF_WARNING_4);
return null;
}
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 1);
}
return json;
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String PROPERTY_VERBOSE = "verbose";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String UPDATE_TENSOR_FIELDS = "update-tensor-fields";
private static final String REMOVE_MAP_FIELDS = "remove-map-fields";
private static final String UPDATE_MAP_FIELDS = "update-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static boolean printId;
private static Map<String, String> mapPartialOperationMap;
static {
mapPartialOperationMap = new HashMap<>();
mapPartialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
mapPartialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private static Map<String, String> partialOperationMap;
static {
partialOperationMap = new HashMap<>();
partialOperationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
partialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 0);
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
printId = Boolean.parseBoolean(properties.getProperty(PROPERTY_VERBOSE, "false"));
}
@Override
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
private static String getPartialOperation(Map<String, String> operationMap, String name, Properties properties) {
for (String label : operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
return operationMap.get(label);
}
}
}
return null;
}
@SuppressWarnings("unchecked")
private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
String operation = getPartialOperation(mapPartialOperationMap, name, properties);
if (operation != null) {
writePartialUpdateAndRemoveMap(name, value, g, properties, schema, op, depth, operation);
} else {
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
}
private static void writePartialUpdateAndRemoveMap(String name, Object value, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth, String operation) throws IOException {
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (operation.equals(PARTIAL_UPDATE_REMOVE)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
} else {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if (isRemoveTensor(name, properties)) {
writeRemoveTensor(map, g);
} else {
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth + 1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth + 1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth + 1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
String operation = getPartialOperation(partialOperationMap, name, properties);
if (operation != null) {
g.writeFieldName(operation);
} else {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(UPDATE_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null) {
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (simpleObjectFields != null) {
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(UPDATE_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties) {
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
private static String getStackTraceAsString(Throwable throwable) {
try (StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter, true)) {
throwable.printStackTrace(printWriter);
return stringWriter.getBuffer().toString();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void warnLog(String msg, PigWarning warning) {
warn(msg, warning);
System.err.println(msg);
}
} | class VespaDocumentOperation extends EvalFunc<String> {
public enum Operation {
DOCUMENT,
PUT,
ID,
REMOVE,
UPDATE;
@Override
public String toString() {
return super.toString().toLowerCase();
}
public static Operation fromString(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return op;
}
}
throw new IllegalArgumentException("Unknown operation: " + text);
}
public static boolean valid(String text) {
for (Operation op : Operation.values()) {
if (op.toString().equalsIgnoreCase(text)) {
return true;
}
}
return false;
}
}
private static final String PROPERTY_CREATE_IF_NON_EXISTENT = "create-if-non-existent";
private static final String PROPERTY_ID_TEMPLATE = "docid";
private static final String PROPERTY_OPERATION = "operation";
private static final String PROPERTY_VERBOSE = "verbose";
private static final String BAG_AS_MAP_FIELDS = "bag-as-map-fields";
private static final String SIMPLE_ARRAY_FIELDS = "simple-array-fields";
private static final String SIMPLE_OBJECT_FIELDS = "simple-object-fields";
private static final String CREATE_TENSOR_FIELDS = "create-tensor-fields";
private static final String REMOVE_TENSOR_FIELDS = "remove-tensor-fields";
private static final String UPDATE_TENSOR_FIELDS = "update-tensor-fields";
private static final String REMOVE_MAP_FIELDS = "remove-map-fields";
private static final String UPDATE_MAP_FIELDS = "update-map-fields";
private static final String EXCLUDE_FIELDS = "exclude-fields";
private static final String TESTSET_CONDITION = "condition";
private static final String PARTIAL_UPDATE_ASSIGN = "assign";
private static final String PARTIAL_UPDATE_ADD = "add";
private static final String PARTIAL_UPDATE_REMOVE = "remove";
private static Map<String, String> mapPartialOperationMap;
static {
mapPartialOperationMap = new HashMap<>();
mapPartialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
mapPartialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private static Map<String, String> partialOperationMap;
static {
partialOperationMap = new HashMap<>();
partialOperationMap.put(REMOVE_TENSOR_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_TENSOR_FIELDS, PARTIAL_UPDATE_ADD);
partialOperationMap.put(REMOVE_MAP_FIELDS, PARTIAL_UPDATE_REMOVE);
partialOperationMap.put(UPDATE_MAP_FIELDS, PARTIAL_UPDATE_ASSIGN);
}
private final boolean verbose;
private final String template;
private final Operation operation;
private final Properties properties;
private PigStatusReporter statusReporter;
public VespaDocumentOperation(String... params) {
statusReporter = PigStatusReporter.getInstance();
if (statusReporter != null) {
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation ok", 0);
statusReporter.incrCounter("Vespa Document Operation Counters", "Document operation failed", 0);
}
properties = VespaConfiguration.loadProperties(params);
template = properties.getProperty(PROPERTY_ID_TEMPLATE);
operation = Operation.fromString(properties.getProperty(PROPERTY_OPERATION, "put"));
verbose = Boolean.parseBoolean(properties.getProperty(PROPERTY_VERBOSE, "false"));
}
@Override
/**
* Create a JSON Vespa document operation given the supplied fields,
* operation and document id template.
*
* @param op Operation (put, remove, update)
* @param docId Document id
* @param fields Fields to put in document operation
* @return A valid JSON Vespa document operation
* @throws IOException ...
*/
public static String create(Operation op, String docId, Map<String, Object> fields, Properties properties,
Schema schema) throws IOException {
if (op == null) {
return null;
}
if (docId == null || docId.length() == 0) {
return null;
}
if (fields.isEmpty()) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonGenerator g = new JsonFactory().createGenerator(out, JsonEncoding.UTF8);
g.writeStartObject();
g.writeStringField(op.toString(), docId);
boolean createIfNonExistent = Boolean.parseBoolean(properties.getProperty(PROPERTY_CREATE_IF_NON_EXISTENT, "false"));
if (op == Operation.UPDATE && createIfNonExistent) {
writeField("create", true, DataType.BOOLEAN, g, properties, schema, op, 0);
}
String testSetConditionTemplate = properties.getProperty(TESTSET_CONDITION);
if (testSetConditionTemplate != null) {
String testSetCondition = TupleTools.toString(fields, testSetConditionTemplate);
writeField(TESTSET_CONDITION, testSetCondition, DataType.CHARARRAY, g, properties, schema, op, 0);
}
if (op != Operation.REMOVE) {
writeField("fields", fields, DataType.MAP, g, properties, schema, op, 0);
}
g.writeEndObject();
g.close();
return out.toString();
}
private static String getPartialOperation(Map<String, String> operationMap, String name, Properties properties) {
for (String label : operationMap.keySet()) {
if (properties.getProperty(label) != null) {
String[] p = properties.getProperty(label).split(",");
if (Arrays.asList(p).contains(name)) {
return operationMap.get(label);
}
}
}
return null;
}
@SuppressWarnings("unchecked")
private static void writeField(String name, Object value, Byte type, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth) throws IOException {
if (shouldWriteField(name, properties, depth)) {
String operation = getPartialOperation(mapPartialOperationMap, name, properties);
if (operation != null) {
writePartialUpdateAndRemoveMap(name, value, g, properties, schema, op, depth, operation);
} else {
g.writeFieldName(name);
if (shouldWritePartialUpdate(op, depth)) {
writePartialUpdate(value, type, g, name, properties, schema, op, depth);
} else {
writeValue(value, type, g, name, properties, schema, op, depth);
}
}
}
}
private static void writePartialUpdateAndRemoveMap(String name, Object value, JsonGenerator g, Properties properties, Schema schema, Operation op, int depth, String operation) throws IOException {
schema = (schema != null) ? schema.getField(0).schema : null;
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
DataBag bag = (DataBag) value;
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
g.writeFieldName(name + "{" + k + "}");
if (operation.equals(PARTIAL_UPDATE_REMOVE)) {
g.writeStartObject();
g.writeFieldName(PARTIAL_UPDATE_REMOVE);
g.writeNumber(0);
g.writeEndObject();
} else {
writePartialUpdate(v, t, g, name, properties, valueSchema, op, depth);
}
}
}
}
@SuppressWarnings("unchecked")
private static void writeValue(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
switch (type) {
case DataType.UNKNOWN:
break;
case DataType.NULL:
g.writeNull();
break;
case DataType.BOOLEAN:
g.writeBoolean((boolean) value);
break;
case DataType.INTEGER:
g.writeNumber((int) value);
break;
case DataType.LONG:
g.writeNumber((long) value);
break;
case DataType.FLOAT:
g.writeNumber((float) value);
break;
case DataType.DOUBLE:
g.writeNumber((double) value);
break;
case DataType.DATETIME:
g.writeNumber(((DateTime) value).getMillis());
break;
case DataType.BYTEARRAY:
DataByteArray bytes = (DataByteArray) value;
String raw = Base64.getEncoder().encodeToString(bytes.get());
g.writeString(raw);
break;
case DataType.CHARARRAY:
g.writeString((String) value);
break;
case DataType.BIGINTEGER:
g.writeNumber((BigInteger) value);
break;
case DataType.BIGDECIMAL:
g.writeNumber((BigDecimal) value);
break;
case DataType.MAP:
g.writeStartObject();
Map<Object, Object> map = (Map<Object, Object>) value;
if (shouldCreateTensor(map, name, properties)) {
if (isRemoveTensor(name, properties)) {
writeRemoveTensor(map, g);
} else {
writeTensor(map, g);
}
} else {
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Object v = entry.getValue();
Byte t = DataType.findType(v);
Schema fieldSchema = (schema != null) ? schema.getField(k).schema : null;
writeField(k, v, t, g, properties, fieldSchema, op, depth + 1);
}
}
g.writeEndObject();
break;
case DataType.TUPLE:
Tuple tuple = (Tuple) value;
if (shouldWriteTupleAsMap(name, properties)) {
Map<String, Object> fields = TupleTools.tupleMap(schema, tuple);
writeValue(fields, DataType.MAP, g, name, properties, schema, op, depth);
} else {
boolean writeStartArray = shouldWriteTupleStart(tuple, name, properties);
if (writeStartArray) {
g.writeStartArray();
}
for (Object v : tuple) {
writeValue(v, DataType.findType(v), g, name, properties, schema, op, depth);
}
if (writeStartArray) {
g.writeEndArray();
}
}
break;
case DataType.BAG:
DataBag bag = (DataBag) value;
schema = (schema != null) ? schema.getField(0).schema : null;
if (shouldWriteBagAsMap(name, properties)) {
Schema valueSchema = (schema != null) ? schema.getField(1).schema : null;
g.writeStartObject();
for (Tuple element : bag) {
if (element.size() != 2) {
continue;
}
String k = (String) element.get(0);
Object v = element.get(1);
Byte t = DataType.findType(v);
if (t == DataType.TUPLE) {
Map<String, Object> fields = TupleTools.tupleMap(valueSchema, (Tuple) v);
writeField(k, fields, DataType.MAP, g, properties, valueSchema, op, depth + 1);
} else {
writeField(k, v, t, g, properties, valueSchema, op, depth + 1);
}
}
g.writeEndObject();
} else {
g.writeStartArray();
for (Tuple t : bag) {
writeValue(t, DataType.TUPLE, g, name, properties, schema, op, depth);
}
g.writeEndArray();
}
break;
}
}
private static boolean shouldWritePartialUpdate(Operation op, int depth) {
return op == Operation.UPDATE && depth == 1;
}
private static void writePartialUpdate(Object value, Byte type, JsonGenerator g, String name, Properties properties, Schema schema, Operation op, int depth) throws IOException {
g.writeStartObject();
String operation = getPartialOperation(partialOperationMap, name, properties);
if (operation != null) {
g.writeFieldName(operation);
} else {
g.writeFieldName(PARTIAL_UPDATE_ASSIGN);
}
writeValue(value, type, g, name, properties, schema, op, depth);
g.writeEndObject();
}
private static boolean shouldWriteTupleStart(Tuple tuple, String name, Properties properties) {
if (tuple.size() > 1 || properties == null) {
return true;
}
String simpleArrayFields = properties.getProperty(SIMPLE_ARRAY_FIELDS);
if (simpleArrayFields == null) {
return true;
}
if (simpleArrayFields.equals("*")) {
return false;
}
String[] fields = simpleArrayFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static boolean shouldWriteTupleAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String addBagAsMapFields = properties.getProperty(UPDATE_MAP_FIELDS);
String simpleObjectFields = properties.getProperty(SIMPLE_OBJECT_FIELDS);
if (simpleObjectFields == null && addBagAsMapFields == null) {
return false;
}
if (addBagAsMapFields != null) {
if (addBagAsMapFields.equals("*")) {
return true;
}
String[] fields = addBagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (simpleObjectFields != null) {
if (simpleObjectFields.equals("*")) {
return true;
}
String[] fields = simpleObjectFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean shouldWriteBagAsMap(String name, Properties properties) {
if (properties == null) {
return false;
}
String bagAsMapFields = properties.getProperty(BAG_AS_MAP_FIELDS);
if (bagAsMapFields == null) {
return false;
}
if (bagAsMapFields.equals("*")) {
return true;
}
String[] fields = bagAsMapFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldCreateTensor(Map<Object, Object> map, String name, Properties properties) {
if (properties == null) {
return false;
}
String createTensorFields = properties.getProperty(CREATE_TENSOR_FIELDS);
String addTensorFields = properties.getProperty(UPDATE_TENSOR_FIELDS);
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (createTensorFields == null && addTensorFields == null && removeTensorFields == null) {
return false;
}
String[] fields;
if (createTensorFields != null) {
fields = createTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (addTensorFields != null) {
fields = addTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
if (removeTensorFields != null) {
fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
}
return false;
}
private static boolean isRemoveTensor(String name, Properties properties) {
if (properties == null) {
return false;
}
String removeTensorFields = properties.getProperty(REMOVE_TENSOR_FIELDS);
if (removeTensorFields == null) {
return false;
}
String[] fields = removeTensorFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
private static boolean shouldWriteField(String name, Properties properties, int depth) {
if (properties == null || depth != 1) {
return true;
}
String excludeFields = properties.getProperty(EXCLUDE_FIELDS);
if (excludeFields == null) {
return true;
}
String[] fields = excludeFields.split(",");
for (String field : fields) {
if (field.trim().equalsIgnoreCase(name)) {
return false;
}
}
return true;
}
private static void writeTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("cells");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
Double v = Double.parseDouble(entry.getValue().toString());
g.writeStartObject();
g.writeFieldName("address");
g.writeStartObject();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
}
g.writeEndObject();
g.writeFieldName("value");
g.writeNumber(v);
g.writeEndObject();
}
g.writeEndArray();
}
private static void writeRemoveTensor(Map<Object, Object> map, JsonGenerator g) throws IOException {
g.writeFieldName("addresses");
g.writeStartArray();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String k = entry.getKey().toString();
String[] dimensions = k.split(",");
for (String dimension : dimensions) {
g.writeStartObject();
if (dimension == null || dimension.isEmpty()) {
continue;
}
String[] address = dimension.split(":");
if (address.length != 2) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
String dim = address[0];
String label = address[1];
if (dim == null || label == null || dim.isEmpty() || label.isEmpty()) {
throw new IllegalArgumentException("Malformed cell address: " + dimension);
}
g.writeFieldName(dim.trim());
g.writeString(label.trim());
g.writeEndObject();
}
}
g.writeEndArray();
}
private static String getStackTraceAsString(Throwable throwable) {
try (StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter, true)) {
throwable.printStackTrace(printWriter);
return stringWriter.getBuffer().toString();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void warnLog(String msg, PigWarning warning) {
warn(msg, warning);
System.err.println(msg);
}
} |
Would be nice if we could just list relevant jobs, and maybe list all the jobs once in a list for order, if we need that. | private HttpResponse root(HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
Cursor platformArray = root.setArray("versions");
var versionStatus = controller.versionStatus();
var systemVersion = versionStatus.systemVersion().map(VespaVersion::versionNumber).orElse(Vtag.currentVersion);
var deploymentStatuses = controller.jobController().deploymentStatuses(ApplicationList.from(controller.applications().asList()), systemVersion);
var deploymentStatistics = DeploymentStatistics.compute(versionStatus.versions().stream().map(VespaVersion::versionNumber).collect(Collectors.toList()),
deploymentStatuses)
.stream().collect(toMap(DeploymentStatistics::version, identity()));
for (VespaVersion version : versionStatus.versions()) {
Cursor versionObject = platformArray.addObject();
versionObject.setString("version", version.versionNumber().toString());
versionObject.setString("confidence", version.confidence().name());
versionObject.setString("commit", version.releaseCommit());
versionObject.setLong("date", version.committedAt().toEpochMilli());
versionObject.setBool("controllerVersion", version.isControllerVersion());
versionObject.setBool("systemVersion", version.isSystemVersion());
Cursor configServerArray = versionObject.setArray("configServers");
for (HostName hostname : version.nodeVersions().hostnames()) {
Cursor configServerObject = configServerArray.addObject();
configServerObject.setString("hostname", hostname.value());
}
DeploymentStatistics statistics = deploymentStatistics.get(version.versionNumber());
Cursor failingArray = versionObject.setArray("failingApplications");
for (Run run : statistics.failingUpgrades()) {
Cursor applicationObject = failingArray.addObject();
toSlime(applicationObject, run.id().application(), request);
applicationObject.setString("failing", run.id().type().jobName());
applicationObject.setString("status", run.status().name());
}
var jobsByInstance = deploymentStatuses.asList().stream()
.flatMap(status -> status.instanceJobs().entrySet().stream())
.collect(toUnmodifiableMap(jobs -> jobs.getKey(), jobs -> jobs.getValue()));
Cursor productionArray = versionObject.setArray("productionApplications");
statistics.productionSuccesses().stream()
.collect(groupingBy(run -> run.id().application()))
.forEach((id, runs) -> {
Cursor applicationObject = productionArray.addObject();
toSlime(applicationObject, id, request);
applicationObject.setLong("productionJobs", jobsByInstance.get(id).production().size());
applicationObject.setLong("productionSuccesses", runs.size());
});
Cursor runningArray = versionObject.setArray("deployingApplications");
for (Run run : statistics.runningUpgrade()) {
Cursor applicationObject = runningArray.addObject();
toSlime(applicationObject, run.id().application(), request);
applicationObject.setString("running", run.id().type().jobName());
}
class RunInfo {
final Run run;
final boolean upgrade;
RunInfo(Run run, boolean upgrade) { this.run = run; this.upgrade = upgrade; }
@Override public String toString() { return run.id().toString(); }
}
Cursor instancesArray = versionObject.setArray("applications");
Stream.of(statistics.failingUpgrades().stream().map(run -> new RunInfo(run, true)),
statistics.otherFailing().stream().map(run -> new RunInfo(run, false)),
statistics.runningUpgrade().stream().map(run -> new RunInfo(run, true)),
statistics.otherRunning().stream().map(run -> new RunInfo(run, false)),
statistics.productionSuccesses().stream().map(run -> new RunInfo(run, true)))
.flatMap(identity())
.collect(Collectors.groupingBy(run -> run.run.id().application(),
LinkedHashMap::new,
groupingBy(run -> run.run.id().type())))
.forEach((instance, runs) -> {
Cursor instanceObject = instancesArray.addObject();
instanceObject.setString("tenant", instance.tenant().value());
instanceObject.setString("application", instance.application().value());
instanceObject.setString("instance", instance.instance().value());
instanceObject.setLong("productionJobCount", jobsByInstance.get(instance).production().size());
instanceObject.setString("upgradePolicy", toString(deploymentStatuses.matching(status -> status.application().id().equals(TenantAndApplicationId.from(instance)))
.first().map(status -> status.application().deploymentSpec())
.flatMap(spec -> spec.instance(instance.instance()).map(DeploymentInstanceSpec::upgradePolicy))
.orElse(DeploymentSpec.UpgradePolicy.defaultPolicy)));
Cursor allJobsObject = instanceObject.setObject("allJobs");
Cursor upgradeJobsObject = instanceObject.setObject("upgradeJobs");
for (JobType type : JobType.allIn(controller.system())) {
Cursor jobObject = allJobsObject.setObject(type.jobName());
Cursor upgradeObject = upgradeJobsObject.setObject(type.jobName());
for (RunInfo run : runs.getOrDefault(type, List.of())) {
toSlime(jobObject, run.run);
if (run.upgrade)
toSlime(upgradeObject, run.run);
}
}
});
}
return new SlimeJsonResponse(slime);
} | for (JobType type : JobType.allIn(controller.system())) { | private HttpResponse root(HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
Cursor platformArray = root.setArray("versions");
var versionStatus = controller.versionStatus();
var systemVersion = versionStatus.systemVersion().map(VespaVersion::versionNumber).orElse(Vtag.currentVersion);
var deploymentStatuses = controller.jobController().deploymentStatuses(ApplicationList.from(controller.applications().asList()), systemVersion);
var deploymentStatistics = DeploymentStatistics.compute(versionStatus.versions().stream().map(VespaVersion::versionNumber).collect(toList()),
deploymentStatuses)
.stream().collect(toMap(DeploymentStatistics::version, identity()));
for (VespaVersion version : versionStatus.versions()) {
Cursor versionObject = platformArray.addObject();
versionObject.setString("version", version.versionNumber().toString());
versionObject.setString("confidence", version.confidence().name());
versionObject.setString("commit", version.releaseCommit());
versionObject.setLong("date", version.committedAt().toEpochMilli());
versionObject.setBool("controllerVersion", version.isControllerVersion());
versionObject.setBool("systemVersion", version.isSystemVersion());
Cursor configServerArray = versionObject.setArray("configServers");
for (HostName hostname : version.nodeVersions().hostnames()) {
Cursor configServerObject = configServerArray.addObject();
configServerObject.setString("hostname", hostname.value());
}
DeploymentStatistics statistics = deploymentStatistics.get(version.versionNumber());
Cursor failingArray = versionObject.setArray("failingApplications");
for (Run run : statistics.failingUpgrades()) {
Cursor applicationObject = failingArray.addObject();
toSlime(applicationObject, run.id().application(), request);
applicationObject.setString("failing", run.id().type().jobName());
applicationObject.setString("status", run.status().name());
}
var jobsByInstance = deploymentStatuses.asList().stream()
.flatMap(status -> status.instanceJobs().entrySet().stream())
.collect(toUnmodifiableMap(jobs -> jobs.getKey(), jobs -> jobs.getValue()));
Cursor productionArray = versionObject.setArray("productionApplications");
statistics.productionSuccesses().stream()
.collect(groupingBy(run -> run.id().application()))
.forEach((id, runs) -> {
Cursor applicationObject = productionArray.addObject();
toSlime(applicationObject, id, request);
applicationObject.setLong("productionJobs", jobsByInstance.get(id).production().size());
applicationObject.setLong("productionSuccesses", runs.size());
});
Cursor runningArray = versionObject.setArray("deployingApplications");
for (Run run : statistics.runningUpgrade()) {
Cursor applicationObject = runningArray.addObject();
toSlime(applicationObject, run.id().application(), request);
applicationObject.setString("running", run.id().type().jobName());
}
class RunInfo {
final Run run;
final boolean upgrade;
RunInfo(Run run, boolean upgrade) { this.run = run; this.upgrade = upgrade; }
@Override public String toString() { return run.id().toString(); }
}
Cursor instancesArray = versionObject.setArray("applications");
Stream.of(statistics.failingUpgrades().stream().map(run -> new RunInfo(run, true)),
statistics.otherFailing().stream().map(run -> new RunInfo(run, false)),
statistics.runningUpgrade().stream().map(run -> new RunInfo(run, true)),
statistics.otherRunning().stream().map(run -> new RunInfo(run, false)),
statistics.productionSuccesses().stream().map(run -> new RunInfo(run, true)))
.flatMap(identity())
.collect(Collectors.groupingBy(run -> run.run.id().application(),
LinkedHashMap::new,
groupingBy(run -> run.run.id().type(),
LinkedHashMap::new,
toList())))
.forEach((instance, runs) -> {
Cursor instanceObject = instancesArray.addObject();
instanceObject.setString("tenant", instance.tenant().value());
instanceObject.setString("application", instance.application().value());
instanceObject.setString("instance", instance.instance().value());
instanceObject.setLong("productionJobCount", jobsByInstance.get(instance).production().size());
instanceObject.setString("upgradePolicy", toString(deploymentStatuses.matching(status -> status.application().id().equals(TenantAndApplicationId.from(instance)))
.first().map(status -> status.application().deploymentSpec())
.flatMap(spec -> spec.instance(instance.instance()).map(DeploymentInstanceSpec::upgradePolicy))
.orElse(DeploymentSpec.UpgradePolicy.defaultPolicy)));
Cursor allJobsObject = instanceObject.setObject("allJobs");
Cursor upgradeJobsObject = instanceObject.setObject("upgradeJobs");
runs.forEach((type, rs) -> {
Cursor jobObject = allJobsObject.setObject(type.jobName());
Cursor upgradeObject = upgradeJobsObject.setObject(type.jobName());
for (RunInfo run : rs) {
toSlime(jobObject, run.run);
if (run.upgrade)
toSlime(upgradeObject, run.run);
}
});
});
}
JobType.allIn(controller.system()).stream().map(JobType::jobName).forEach(root.setArray("jobs")::addString);
return new SlimeJsonResponse(slime);
} | class DeploymentApiHandler extends LoggingRequestHandler {
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
public DeploymentApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/deployment/v1/")) return root(request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,OPTIONS");
return response;
}
private void toSlime(Cursor jobObject, Run run) {
String key = run.hasFailed() ? "failing" : run.hasEnded() ? "success" : "running";
Cursor runObject = jobObject.setObject(key);
runObject.setLong("number", run.id().number());
runObject.setLong("start", run.start().toEpochMilli());
run.end().ifPresent(end -> runObject.setLong("end", end.toEpochMilli()));
runObject.setString("status", run.status().name());
}
private void toSlime(Cursor object, ApplicationId id, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", new Uri(request.getUri()).withPath("/application/v4/tenant/" +
id.tenant().value() +
"/application/" +
id.application().value()).toString());
object.setString("upgradePolicy", toString(controller.applications().requireApplication(TenantAndApplicationId.from(id))
.deploymentSpec().instance(id.instance()).map(DeploymentInstanceSpec::upgradePolicy)
.orElse(DeploymentSpec.UpgradePolicy.defaultPolicy)));
}
private static String toString(DeploymentSpec.UpgradePolicy upgradePolicy) {
if (upgradePolicy == DeploymentSpec.UpgradePolicy.defaultPolicy) {
return "default";
}
return upgradePolicy.name();
}
} | class DeploymentApiHandler extends LoggingRequestHandler {
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
public DeploymentApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/deployment/v1/")) return root(request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,OPTIONS");
return response;
}
private void toSlime(Cursor jobObject, Run run) {
String key = run.hasFailed() ? "failing" : run.hasEnded() ? "success" : "running";
Cursor runObject = jobObject.setObject(key);
runObject.setLong("number", run.id().number());
runObject.setLong("start", run.start().toEpochMilli());
run.end().ifPresent(end -> runObject.setLong("end", end.toEpochMilli()));
runObject.setString("status", run.status().name());
}
private void toSlime(Cursor object, ApplicationId id, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", new Uri(request.getUri()).withPath("/application/v4/tenant/" +
id.tenant().value() +
"/application/" +
id.application().value()).toString());
object.setString("upgradePolicy", toString(controller.applications().requireApplication(TenantAndApplicationId.from(id))
.deploymentSpec().instance(id.instance()).map(DeploymentInstanceSpec::upgradePolicy)
.orElse(DeploymentSpec.UpgradePolicy.defaultPolicy)));
}
private static String toString(DeploymentSpec.UpgradePolicy upgradePolicy) {
if (upgradePolicy == DeploymentSpec.UpgradePolicy.defaultPolicy) {
return "default";
}
return upgradePolicy.name();
}
} |
I guess that's an option that saves some JSON over the wire :) | private HttpResponse root(HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
Cursor platformArray = root.setArray("versions");
var versionStatus = controller.versionStatus();
var systemVersion = versionStatus.systemVersion().map(VespaVersion::versionNumber).orElse(Vtag.currentVersion);
var deploymentStatuses = controller.jobController().deploymentStatuses(ApplicationList.from(controller.applications().asList()), systemVersion);
var deploymentStatistics = DeploymentStatistics.compute(versionStatus.versions().stream().map(VespaVersion::versionNumber).collect(Collectors.toList()),
deploymentStatuses)
.stream().collect(toMap(DeploymentStatistics::version, identity()));
for (VespaVersion version : versionStatus.versions()) {
Cursor versionObject = platformArray.addObject();
versionObject.setString("version", version.versionNumber().toString());
versionObject.setString("confidence", version.confidence().name());
versionObject.setString("commit", version.releaseCommit());
versionObject.setLong("date", version.committedAt().toEpochMilli());
versionObject.setBool("controllerVersion", version.isControllerVersion());
versionObject.setBool("systemVersion", version.isSystemVersion());
Cursor configServerArray = versionObject.setArray("configServers");
for (HostName hostname : version.nodeVersions().hostnames()) {
Cursor configServerObject = configServerArray.addObject();
configServerObject.setString("hostname", hostname.value());
}
DeploymentStatistics statistics = deploymentStatistics.get(version.versionNumber());
Cursor failingArray = versionObject.setArray("failingApplications");
for (Run run : statistics.failingUpgrades()) {
Cursor applicationObject = failingArray.addObject();
toSlime(applicationObject, run.id().application(), request);
applicationObject.setString("failing", run.id().type().jobName());
applicationObject.setString("status", run.status().name());
}
var jobsByInstance = deploymentStatuses.asList().stream()
.flatMap(status -> status.instanceJobs().entrySet().stream())
.collect(toUnmodifiableMap(jobs -> jobs.getKey(), jobs -> jobs.getValue()));
Cursor productionArray = versionObject.setArray("productionApplications");
statistics.productionSuccesses().stream()
.collect(groupingBy(run -> run.id().application()))
.forEach((id, runs) -> {
Cursor applicationObject = productionArray.addObject();
toSlime(applicationObject, id, request);
applicationObject.setLong("productionJobs", jobsByInstance.get(id).production().size());
applicationObject.setLong("productionSuccesses", runs.size());
});
Cursor runningArray = versionObject.setArray("deployingApplications");
for (Run run : statistics.runningUpgrade()) {
Cursor applicationObject = runningArray.addObject();
toSlime(applicationObject, run.id().application(), request);
applicationObject.setString("running", run.id().type().jobName());
}
class RunInfo {
final Run run;
final boolean upgrade;
RunInfo(Run run, boolean upgrade) { this.run = run; this.upgrade = upgrade; }
@Override public String toString() { return run.id().toString(); }
}
Cursor instancesArray = versionObject.setArray("applications");
Stream.of(statistics.failingUpgrades().stream().map(run -> new RunInfo(run, true)),
statistics.otherFailing().stream().map(run -> new RunInfo(run, false)),
statistics.runningUpgrade().stream().map(run -> new RunInfo(run, true)),
statistics.otherRunning().stream().map(run -> new RunInfo(run, false)),
statistics.productionSuccesses().stream().map(run -> new RunInfo(run, true)))
.flatMap(identity())
.collect(Collectors.groupingBy(run -> run.run.id().application(),
LinkedHashMap::new,
groupingBy(run -> run.run.id().type())))
.forEach((instance, runs) -> {
Cursor instanceObject = instancesArray.addObject();
instanceObject.setString("tenant", instance.tenant().value());
instanceObject.setString("application", instance.application().value());
instanceObject.setString("instance", instance.instance().value());
instanceObject.setLong("productionJobCount", jobsByInstance.get(instance).production().size());
instanceObject.setString("upgradePolicy", toString(deploymentStatuses.matching(status -> status.application().id().equals(TenantAndApplicationId.from(instance)))
.first().map(status -> status.application().deploymentSpec())
.flatMap(spec -> spec.instance(instance.instance()).map(DeploymentInstanceSpec::upgradePolicy))
.orElse(DeploymentSpec.UpgradePolicy.defaultPolicy)));
Cursor allJobsObject = instanceObject.setObject("allJobs");
Cursor upgradeJobsObject = instanceObject.setObject("upgradeJobs");
for (JobType type : JobType.allIn(controller.system())) {
Cursor jobObject = allJobsObject.setObject(type.jobName());
Cursor upgradeObject = upgradeJobsObject.setObject(type.jobName());
for (RunInfo run : runs.getOrDefault(type, List.of())) {
toSlime(jobObject, run.run);
if (run.upgrade)
toSlime(upgradeObject, run.run);
}
}
});
}
return new SlimeJsonResponse(slime);
} | for (JobType type : JobType.allIn(controller.system())) { | private HttpResponse root(HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
Cursor platformArray = root.setArray("versions");
var versionStatus = controller.versionStatus();
var systemVersion = versionStatus.systemVersion().map(VespaVersion::versionNumber).orElse(Vtag.currentVersion);
var deploymentStatuses = controller.jobController().deploymentStatuses(ApplicationList.from(controller.applications().asList()), systemVersion);
var deploymentStatistics = DeploymentStatistics.compute(versionStatus.versions().stream().map(VespaVersion::versionNumber).collect(toList()),
deploymentStatuses)
.stream().collect(toMap(DeploymentStatistics::version, identity()));
for (VespaVersion version : versionStatus.versions()) {
Cursor versionObject = platformArray.addObject();
versionObject.setString("version", version.versionNumber().toString());
versionObject.setString("confidence", version.confidence().name());
versionObject.setString("commit", version.releaseCommit());
versionObject.setLong("date", version.committedAt().toEpochMilli());
versionObject.setBool("controllerVersion", version.isControllerVersion());
versionObject.setBool("systemVersion", version.isSystemVersion());
Cursor configServerArray = versionObject.setArray("configServers");
for (HostName hostname : version.nodeVersions().hostnames()) {
Cursor configServerObject = configServerArray.addObject();
configServerObject.setString("hostname", hostname.value());
}
DeploymentStatistics statistics = deploymentStatistics.get(version.versionNumber());
Cursor failingArray = versionObject.setArray("failingApplications");
for (Run run : statistics.failingUpgrades()) {
Cursor applicationObject = failingArray.addObject();
toSlime(applicationObject, run.id().application(), request);
applicationObject.setString("failing", run.id().type().jobName());
applicationObject.setString("status", run.status().name());
}
var jobsByInstance = deploymentStatuses.asList().stream()
.flatMap(status -> status.instanceJobs().entrySet().stream())
.collect(toUnmodifiableMap(jobs -> jobs.getKey(), jobs -> jobs.getValue()));
Cursor productionArray = versionObject.setArray("productionApplications");
statistics.productionSuccesses().stream()
.collect(groupingBy(run -> run.id().application()))
.forEach((id, runs) -> {
Cursor applicationObject = productionArray.addObject();
toSlime(applicationObject, id, request);
applicationObject.setLong("productionJobs", jobsByInstance.get(id).production().size());
applicationObject.setLong("productionSuccesses", runs.size());
});
Cursor runningArray = versionObject.setArray("deployingApplications");
for (Run run : statistics.runningUpgrade()) {
Cursor applicationObject = runningArray.addObject();
toSlime(applicationObject, run.id().application(), request);
applicationObject.setString("running", run.id().type().jobName());
}
class RunInfo {
final Run run;
final boolean upgrade;
RunInfo(Run run, boolean upgrade) { this.run = run; this.upgrade = upgrade; }
@Override public String toString() { return run.id().toString(); }
}
Cursor instancesArray = versionObject.setArray("applications");
Stream.of(statistics.failingUpgrades().stream().map(run -> new RunInfo(run, true)),
statistics.otherFailing().stream().map(run -> new RunInfo(run, false)),
statistics.runningUpgrade().stream().map(run -> new RunInfo(run, true)),
statistics.otherRunning().stream().map(run -> new RunInfo(run, false)),
statistics.productionSuccesses().stream().map(run -> new RunInfo(run, true)))
.flatMap(identity())
.collect(Collectors.groupingBy(run -> run.run.id().application(),
LinkedHashMap::new,
groupingBy(run -> run.run.id().type(),
LinkedHashMap::new,
toList())))
.forEach((instance, runs) -> {
Cursor instanceObject = instancesArray.addObject();
instanceObject.setString("tenant", instance.tenant().value());
instanceObject.setString("application", instance.application().value());
instanceObject.setString("instance", instance.instance().value());
instanceObject.setLong("productionJobCount", jobsByInstance.get(instance).production().size());
instanceObject.setString("upgradePolicy", toString(deploymentStatuses.matching(status -> status.application().id().equals(TenantAndApplicationId.from(instance)))
.first().map(status -> status.application().deploymentSpec())
.flatMap(spec -> spec.instance(instance.instance()).map(DeploymentInstanceSpec::upgradePolicy))
.orElse(DeploymentSpec.UpgradePolicy.defaultPolicy)));
Cursor allJobsObject = instanceObject.setObject("allJobs");
Cursor upgradeJobsObject = instanceObject.setObject("upgradeJobs");
runs.forEach((type, rs) -> {
Cursor jobObject = allJobsObject.setObject(type.jobName());
Cursor upgradeObject = upgradeJobsObject.setObject(type.jobName());
for (RunInfo run : rs) {
toSlime(jobObject, run.run);
if (run.upgrade)
toSlime(upgradeObject, run.run);
}
});
});
}
JobType.allIn(controller.system()).stream().map(JobType::jobName).forEach(root.setArray("jobs")::addString);
return new SlimeJsonResponse(slime);
} | class DeploymentApiHandler extends LoggingRequestHandler {
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
public DeploymentApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/deployment/v1/")) return root(request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,OPTIONS");
return response;
}
private void toSlime(Cursor jobObject, Run run) {
String key = run.hasFailed() ? "failing" : run.hasEnded() ? "success" : "running";
Cursor runObject = jobObject.setObject(key);
runObject.setLong("number", run.id().number());
runObject.setLong("start", run.start().toEpochMilli());
run.end().ifPresent(end -> runObject.setLong("end", end.toEpochMilli()));
runObject.setString("status", run.status().name());
}
private void toSlime(Cursor object, ApplicationId id, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", new Uri(request.getUri()).withPath("/application/v4/tenant/" +
id.tenant().value() +
"/application/" +
id.application().value()).toString());
object.setString("upgradePolicy", toString(controller.applications().requireApplication(TenantAndApplicationId.from(id))
.deploymentSpec().instance(id.instance()).map(DeploymentInstanceSpec::upgradePolicy)
.orElse(DeploymentSpec.UpgradePolicy.defaultPolicy)));
}
private static String toString(DeploymentSpec.UpgradePolicy upgradePolicy) {
if (upgradePolicy == DeploymentSpec.UpgradePolicy.defaultPolicy) {
return "default";
}
return upgradePolicy.name();
}
} | class DeploymentApiHandler extends LoggingRequestHandler {
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
public DeploymentApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/deployment/v1/")) return root(request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,OPTIONS");
return response;
}
private void toSlime(Cursor jobObject, Run run) {
String key = run.hasFailed() ? "failing" : run.hasEnded() ? "success" : "running";
Cursor runObject = jobObject.setObject(key);
runObject.setLong("number", run.id().number());
runObject.setLong("start", run.start().toEpochMilli());
run.end().ifPresent(end -> runObject.setLong("end", end.toEpochMilli()));
runObject.setString("status", run.status().name());
}
private void toSlime(Cursor object, ApplicationId id, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", new Uri(request.getUri()).withPath("/application/v4/tenant/" +
id.tenant().value() +
"/application/" +
id.application().value()).toString());
object.setString("upgradePolicy", toString(controller.applications().requireApplication(TenantAndApplicationId.from(id))
.deploymentSpec().instance(id.instance()).map(DeploymentInstanceSpec::upgradePolicy)
.orElse(DeploymentSpec.UpgradePolicy.defaultPolicy)));
}
private static String toString(DeploymentSpec.UpgradePolicy upgradePolicy) {
if (upgradePolicy == DeploymentSpec.UpgradePolicy.defaultPolicy) {
return "default";
}
return upgradePolicy.name();
}
} |
```suggestion assertFalse(JSON.equals("[\"a\",\"b\"]", "[\"b\",\"a\"]")); ``` | public void testEquals() {
assertTrue(JSON.equals("{}", "{}"));
assertTrue(JSON.equals("{}", "\n{ }"));
assertTrue(JSON.equals("{\"a\":0, \"c\":1}", "{\"c\":1, \"a\":0}"));
assertFalse(JSON.equals("[\"a\",\"b\"]", "[\"b\",\"c\"]"));
assertFalse(JSON.equals("{\"a\":null}", "{}"));
assertEquals(9223372036854775807L, Long.MAX_VALUE);
assertTrue(JSON.equals("{\"a\": 9223372036854775807}", "{\"a\": 9223372036854775807}"));
assertTrue(JSON.equals( "{\"a\": 1}", "{\"a\": 1}"));
assertTrue(JSON.equals( "{\"a\": 1.0}", "{\"a\": 1.0}"));
assertFalse(JSON.equals( "{\"a\": 1.0}", "{\"a\": 1}"));
assertTrue(JSON.equals( "{\"e\": 2.71828182845904}", "{\"e\": 2.71828182845904}"));
double e1 = 2.7182818284590452354;
double e2 = 2.718281828459045;
double e3 = 2.71828182845904;
assertEquals(e1, Math.E, -1);
assertEquals(e1, e2, -1);
assertNotEquals(e1, e3, -1);
assertTrue(JSON.equals( "{\"a\": 92233720368547758070}",
"{\"a\": 92233720368547758070}"));
assertFalse(JSON.equals("{\"a\": 92233720368547758070}",
"{\"a\": 92233720368547758071}"));
assertTrue(JSON.equals( "{\"e\": 2.7182818284590452354}",
"{\"e\": 2.7182818284590452354}"));
assertTrue(JSON.equals( "{\"e\": 2.7182818284590452354}",
"{\"e\": 2.7182818284590452355}"));
assertFalse(JSON.equals("{\"e\": 2.7182818284590452354}",
"{\"e\": 2.71828182845904}"));
assertFalse(JSON.equals("{\"a\": 1.0}", "{\"a\":1}"));
assertTrue(JSON.equals("{\"a\": 1.0}", "{\"a\":1.00}"));
assertTrue(JSON.equals("{\"a\": 1.0}", "{\"a\":1.0000000000000000000000000000}"));
assertTrue(JSON.equals("{\"a\": 10.0}", "{\"a\":1e1}"));
assertTrue(JSON.equals("{\"a\": 1.2}", "{\"a\":12e-1}"));
} | assertFalse(JSON.equals("[\"a\",\"b\"]", "[\"b\",\"c\"]")); | public void testEquals() {
assertTrue(JSON.equals("{}", "{}"));
assertTrue(JSON.equals("{}", "\n{ }"));
assertTrue(JSON.equals("{\"a\":0, \"c\":1}", "{\"c\":1, \"a\":0}"));
assertFalse(JSON.equals("{\"a\":0}", "{\"a\":0, \"b\":0}"));
assertFalse(JSON.equals("{\"a\":0, \"b\":0}", "{\"a\":0}"));
assertFalse(JSON.equals("[\"a\",\"b\"]", "[\"b\",\"a\"]"));
assertFalse(JSON.equals("{\"a\":null}", "{}"));
assertEquals(9223372036854775807L, Long.MAX_VALUE);
assertTrue(JSON.equals("{\"a\": 9223372036854775807}", "{\"a\": 9223372036854775807}"));
assertTrue(JSON.equals("{\"a\": 1}", "{\"a\": 1}"));
assertTrue(JSON.equals("{\"a\": 1.0}", "{\"a\": 1.0}"));
assertFalse(JSON.equals("{\"a\": 1.0}", "{\"a\": 1}"));
assertTrue(JSON.equals("{\"e\": 2.71828182845904}", "{\"e\": 2.71828182845904}"));
assertTrue(JSON.equals("{\"e\": 1.7976931348623156e+308}", "{\"e\": 1.7976931348623156e+308}"));
double e1 = 2.7182818284590452354;
double e2 = 2.718281828459045;
double e3 = 2.71828182845904;
assertEquals(e1, Math.E, -1);
assertEquals(e1, e2, -1);
assertNotEquals(e1, e3, -1);
assertRuntimeException(() -> JSON.equals("", "{}"));
assertRuntimeException(() -> JSON.equals("{}", ""));
assertRuntimeException(() -> JSON.equals("{", "{}"));
assertRuntimeException(() -> JSON.equals("{}", "{"));
}
@Test
public void implementationSpecificEqualsBehavior() {
assertTrue( JSON.equals("{\"a\": 9223372036854775807}", "{\"a\": 9223372036854775807}"));
assertRuntimeException(() -> JSON.equals("{\"a\": 9223372036854775808}", "{\"a\": 9223372036854775808}"));
assertTrue(JSON.equals("{\"a\": 2.7976931348623158e+308}", "{\"a\": 2.7976931348623158e+308}"));
assertTrue(JSON.equals( "{\"e\": 2.7182818284590452354}",
"{\"e\": 2.7182818284590452354}"));
assertTrue(JSON.equals( "{\"e\": 2.7182818284590452354}",
"{\"e\": 2.7182818284590452355}"));
assertFalse(JSON.equals("{\"e\": 2.7182818284590452354}",
"{\"e\": 2.71828182845904}"));
assertFalse(JSON.equals("{\"a\": 1.0}", "{\"a\":1}"));
assertTrue(JSON.equals("{\"a\": 1.0}", "{\"a\":1.00}"));
assertTrue(JSON.equals("{\"a\": 1.0}", "{\"a\":1.0000000000000000000000000000}"));
assertTrue(JSON.equals("{\"a\": 10.0}", "{\"a\":1e1}"));
assertTrue(JSON.equals("{\"a\": 1.2}", "{\"a\":12e-1}"));
}
private static void assertRuntimeException(Runnable runnable) {
try {
runnable.run();
fail("Expected RuntimeException to be thrown, but no exception was thrown");
} catch (RuntimeException e) {
}
}
} | class JSONTest {
@Test
public void testMapToString() {
Map<String,Object> map = new LinkedHashMap<>();
map.put("a \"key\"", 3);
map.put("key2", "value");
map.put("key3", 3.3);
assertEquals("{\"a \\\"key\\\"\":3,\"key2\":\"value\",\"key3\":3.3}", JSON.encode(map));
}
@Test
} | class JSONTest {
@Test
public void testMapToString() {
Map<String,Object> map = new LinkedHashMap<>();
map.put("a \"key\"", 3);
map.put("key2", "value");
map.put("key3", 3.3);
assertEquals("{\"a \\\"key\\\"\":3,\"key2\":\"value\",\"key3\":3.3}", JSON.encode(map));
}
@Test |
Consider moving this duplicated sub-expression into a method | protected static ClusterDef resolveClusterDef(Optional<String> wantedCluster, List<ClusterDef> clusters) throws RestApiException {
if (clusters.size() == 0) {
throw new IllegalArgumentException("Your Vespa cluster does not have any content clusters " +
"declared. Visiting feature is not available.");
}
if (! wantedCluster.isPresent()) {
if (clusters.size() != 1) {
String message = "Several clusters exist: " +
clusters.stream().map(c -> "'" + c.getName() + "'").collect(Collectors.joining(", ")) +
". You must specify one.";
throw new RestApiException(Response.createErrorResponse(400,
message,
RestUri.apiErrorCodes.SEVERAL_CLUSTERS));
}
return clusters.get(0);
}
for (ClusterDef clusterDef : clusters) {
if (clusterDef.getName().equals(wantedCluster.get())) {
return clusterDef;
}
}
String message = "Your vespa cluster contains the content clusters " +
clusters.stream().map(c -> "'" + c.getName() + "'").collect(Collectors.joining(", ")) +
", not '" + wantedCluster.get() + "'. Please select a valid vespa cluster.";
throw new RestApiException(Response.createErrorResponse(400,
message,
RestUri.apiErrorCodes.MISSING_CLUSTER));
} | clusters.stream().map(c -> "'" + c.getName() + "'").collect(Collectors.joining(", ")) + | protected static ClusterDef resolveClusterDef(Optional<String> wantedCluster, List<ClusterDef> clusters) throws RestApiException {
if (clusters.size() == 0) {
throw new IllegalArgumentException("Your Vespa cluster does not have any content clusters " +
"declared. Visiting feature is not available.");
}
if (! wantedCluster.isPresent()) {
if (clusters.size() != 1) {
String message = "Several clusters exist: " +
clusters.stream().map(c -> "'" + c.getName() + "'").collect(Collectors.joining(", ")) +
". You must specify one.";
throw new RestApiException(Response.createErrorResponse(400,
message,
RestUri.apiErrorCodes.SEVERAL_CLUSTERS));
}
return clusters.get(0);
}
for (ClusterDef clusterDef : clusters) {
if (clusterDef.getName().equals(wantedCluster.get())) {
return clusterDef;
}
}
String message = "Your vespa cluster contains the content clusters " +
clusters.stream().map(c -> "'" + c.getName() + "'").collect(Collectors.joining(", ")) +
", not '" + wantedCluster.get() + "'. Please select a valid vespa cluster.";
throw new RestApiException(Response.createErrorResponse(400,
message,
RestUri.apiErrorCodes.MISSING_CLUSTER));
} | class SyncSessionFactory extends ResourceFactory<SyncSession> {
private final DocumentAccess documentAccess;
SyncSessionFactory(DocumentAccess documentAccess) {
this.documentAccess = documentAccess;
}
@Override
public SyncSession create() {
return documentAccess.createSyncSession(new SyncParameters.Builder().build());
}
} | class SyncSessionFactory extends ResourceFactory<SyncSession> {
private final DocumentAccess documentAccess;
SyncSessionFactory(DocumentAccess documentAccess) {
this.documentAccess = documentAccess;
}
@Override
public SyncSession create() {
return documentAccess.createSyncSession(new SyncParameters.Builder().build());
}
} |
I think a quick comment on the significance of the number 5 would be nice here | public IdIdString(String namespace, String type, String keyValues, String localId) {
super(Scheme.id, namespace, localId);
this.type = type;
boolean hasSetLocation = false;
if (namespace.length() + type.length() + keyValues.length() + 5 >= IdString.MAX_LENGTH_EXCEPT_NAMESPACE_SPECIFIC) {
throw new IllegalArgumentException("Length of namespace(" + namespace.length() + ") + doctype(" + type.length() +
") + key/values(" + keyValues.length() +"), is longer than " + (MAX_LENGTH_EXCEPT_NAMESPACE_SPECIFIC - 5));
}
for(String pair : keyValues.split(",")) {
int pos = pair.indexOf('=');
if (pos == -1) {
if (pair.equals("")) {
continue;
}
throw new IllegalArgumentException("Illegal key-value pair '" + pair + "'");
}
String key = pair.substring(0, pos);
String value = pair.substring(pos + 1);
switch(key) {
case "n":
if (hasSetLocation) {
throw new IllegalArgumentException("Illegal key combination in " + keyValues);
}
if (value.isEmpty()) {
throw new IllegalArgumentException("ID location value for 'n=' key is empty");
}
location = Long.parseLong(value);
hasSetLocation = true;
hasNumber = true;
break;
case "g":
if (hasSetLocation) {
throw new IllegalArgumentException("Illegal key combination in " + keyValues);
}
if (value.isEmpty()) {
throw new IllegalArgumentException("ID location value for 'g=' key is empty");
}
location = makeLocation(value);
hasSetLocation = true;
hasGroup = true;
group = value;
break;
default:
throw new IllegalArgumentException("Illegal key '" + key + "'");
}
}
if (!hasSetLocation) {
location = makeLocation(localId);
}
} | if (namespace.length() + type.length() + keyValues.length() + 5 >= IdString.MAX_LENGTH_EXCEPT_NAMESPACE_SPECIFIC) { | public IdIdString(String namespace, String type, String keyValues, String localId) {
super(Scheme.id, namespace, localId);
this.type = type;
boolean hasSetLocation = false;
if (namespace.length() + type.length() + keyValues.length() + 5 >= IdString.MAX_LENGTH_EXCEPT_NAMESPACE_SPECIFIC) {
throw new IllegalArgumentException("Length of namespace(" + namespace.length() + ") + doctype(" + type.length() +
") + key/values(" + keyValues.length() +"), is longer than " + (MAX_LENGTH_EXCEPT_NAMESPACE_SPECIFIC - 5));
}
for(String pair : keyValues.split(",")) {
int pos = pair.indexOf('=');
if (pos == -1) {
if (pair.equals("")) {
continue;
}
throw new IllegalArgumentException("Illegal key-value pair '" + pair + "'");
}
String key = pair.substring(0, pos);
String value = pair.substring(pos + 1);
switch(key) {
case "n":
if (hasSetLocation) {
throw new IllegalArgumentException("Illegal key combination in " + keyValues);
}
if (value.isEmpty()) {
throw new IllegalArgumentException("ID location value for 'n=' key is empty");
}
location = Long.parseLong(value);
hasSetLocation = true;
hasNumber = true;
break;
case "g":
if (hasSetLocation) {
throw new IllegalArgumentException("Illegal key combination in " + keyValues);
}
if (value.isEmpty()) {
throw new IllegalArgumentException("ID location value for 'g=' key is empty");
}
location = makeLocation(value);
hasSetLocation = true;
hasGroup = true;
group = value;
break;
default:
throw new IllegalArgumentException("Illegal key '" + key + "'");
}
}
if (!hasSetLocation) {
location = makeLocation(localId);
}
} | class IdIdString extends IdString {
private String type;
private String group;
private long location;
private boolean hasGroup;
private boolean hasNumber;
public static String replaceType(String id, String typeName) {
int typeStartPos = id.indexOf(":", 3) + 1;
int typeEndPos = id.indexOf(":", typeStartPos);
return id.substring(0, typeStartPos) + typeName + id.substring(typeEndPos);
}
public static long makeLocation(String s) {
long result = 0;
byte[] md5sum = MD5.md5.get().digest(Utf8.toBytes(s));
for (int i=0; i<8; ++i) {
result |= (md5sum[i] & 0xFFl) << (8*i);
}
return result;
}
/**
* Create an id scheme object.
* <code>doc:<namespace>:<documentType>:<key-value-pairs>:<namespaceSpecific></code>
*
* @param namespace The namespace of this document id.
* @param type The type of this document id.
* @param keyValues The key/value pairs of this document id.
* @param localId The namespace specific part.
*/
@Override
public long getLocation() {
return location;
}
@Override
public String getSchemeSpecific() {
if (hasGroup) {
return type + ":g=" + group + ":";
} else if (hasNumber) {
return type + ":n=" + location + ":";
} else {
return type + "::";
}
}
@Override
public boolean hasDocType() {
return true;
}
@Override
public String getDocType() {
return type;
}
@Override
public boolean hasGroup() {
return hasGroup;
}
@Override
public String getGroup() {
return group;
}
@Override
public boolean hasNumber() {
return hasNumber;
}
@Override
public long getNumber() {
return location;
}
} | class IdIdString extends IdString {
private String type;
private String group;
private long location;
private boolean hasGroup;
private boolean hasNumber;
public static String replaceType(String id, String typeName) {
int typeStartPos = id.indexOf(":", 3) + 1;
int typeEndPos = id.indexOf(":", typeStartPos);
return id.substring(0, typeStartPos) + typeName + id.substring(typeEndPos);
}
public static long makeLocation(String s) {
long result = 0;
byte[] md5sum = MD5.md5.get().digest(Utf8.toBytes(s));
for (int i=0; i<8; ++i) {
result |= (md5sum[i] & 0xFFl) << (8*i);
}
return result;
}
/**
* Create an id scheme object.
* <code>doc:<namespace>:<documentType>:<key-value-pairs>:<namespaceSpecific></code>
*
* @param namespace The namespace of this document id.
* @param type The type of this document id.
* @param keyValues The key/value pairs of this document id.
* @param localId The namespace specific part.
*/
@Override
public long getLocation() {
return location;
}
@Override
public String getSchemeSpecific() {
if (hasGroup) {
return type + ":g=" + group + ":";
} else if (hasNumber) {
return type + ":n=" + location + ":";
} else {
return type + "::";
}
}
@Override
public boolean hasDocType() {
return true;
}
@Override
public String getDocType() {
return type;
}
@Override
public boolean hasGroup() {
return hasGroup;
}
@Override
public String getGroup() {
return group;
}
@Override
public boolean hasNumber() {
return hasNumber;
}
@Override
public long getNumber() {
return location;
}
} |
Same, why not move this into the estimator. | public int estimateHitsToFetch(int wantedHits, int numPartitions) {
return ((hitEstimator == null) || (numPartitions <= 1))
? wantedHits
: hitEstimator.estimateK(wantedHits, numPartitions);
} | return ((hitEstimator == null) || (numPartitions <= 1)) | public int estimateHitsToFetch(int wantedHits, int numPartitions) {
return hitEstimator.estimateK(wantedHits, numPartitions);
} | class TopKEstimator {
private final TDistribution studentT;
private final double p;
TopKEstimator(double freedom, double wantedprobability) {
this.studentT = new TDistribution(null, freedom);
p = wantedprobability;
}
double estimateExactK(double k, double n) {
double variance = k * 1/n * (1 - 1/n);
double p_inverse = 1 - (1 - p)/n;
return k/n + studentT.inverseCumulativeProbability(p_inverse) * Math.sqrt(variance);
}
int estimateK(double k, double n) {
return (int)Math.ceil(estimateExactK(k, n));
}
} | class SearchCluster implements NodeManager<Node> {
private static final Logger log = Logger.getLogger(SearchCluster.class.getName());
private final DispatchConfig dispatchConfig;
private final int size;
private final String clusterId;
private final ImmutableMap<Integer, Group> groups;
private final ImmutableMultimap<String, Node> nodesByHost;
private final ImmutableList<Group> orderedGroups;
private final VipStatus vipStatus;
private final PingFactory pingFactory;
private final TopKEstimator hitEstimator;
private long nextLogTime = 0;
/**
* A search node on this local machine having the entire corpus, which we therefore
* should prefer to dispatch directly to, or empty if there is no such local search node.
* If there is one, we also maintain the VIP status of this container based on the availability
* of the corpus on this local node (up + has coverage), such that this node is taken out of rotation
* if it only queries this cluster when the local node cannot be used, to avoid unnecessary
* cross-node network traffic.
*/
private final Optional<Node> localCorpusDispatchTarget;
public SearchCluster(String clusterId, DispatchConfig dispatchConfig, int containerClusterSize,
VipStatus vipStatus, PingFactory pingFactory) {
this.clusterId = clusterId;
this.dispatchConfig = dispatchConfig;
this.vipStatus = vipStatus;
this.pingFactory = pingFactory;
List<Node> nodes = toNodes(dispatchConfig);
this.size = nodes.size();
ImmutableMap.Builder<Integer, Group> groupsBuilder = new ImmutableMap.Builder<>();
for (Map.Entry<Integer, List<Node>> group : nodes.stream().collect(Collectors.groupingBy(Node::group)).entrySet()) {
Group g = new Group(group.getKey(), group.getValue());
groupsBuilder.put(group.getKey(), g);
}
this.groups = groupsBuilder.build();
LinkedHashMap<Integer, Group> groupIntroductionOrder = new LinkedHashMap<>();
nodes.forEach(node -> groupIntroductionOrder.put(node.group(), groups.get(node.group())));
this.orderedGroups = ImmutableList.<Group>builder().addAll(groupIntroductionOrder.values()).build();
ImmutableMultimap.Builder<String, Node> nodesByHostBuilder = new ImmutableMultimap.Builder<>();
for (Node node : nodes)
nodesByHostBuilder.put(node.hostname(), node);
this.nodesByHost = nodesByHostBuilder.build();
hitEstimator = new TopKEstimator(30.0, dispatchConfig.topKProbability());
this.localCorpusDispatchTarget = findLocalCorpusDispatchTarget(HostName.getLocalhost(),
size,
containerClusterSize,
nodesByHost,
groups);
}
/* Testing only */
public SearchCluster(String clusterId, DispatchConfig dispatchConfig,
VipStatus vipStatus, PingFactory pingFactory) {
this(clusterId, dispatchConfig, 1, vipStatus, pingFactory);
}
public void addMonitoring(ClusterMonitor clusterMonitor) {
for (var group : orderedGroups) {
for (var node : group.nodes())
clusterMonitor.add(node, true);
}
}
private static Optional<Node> findLocalCorpusDispatchTarget(String selfHostname,
int searchClusterSize,
int containerClusterSize,
ImmutableMultimap<String, Node> nodesByHost,
ImmutableMap<Integer, Group> groups) {
ImmutableCollection<Node> localSearchNodes = nodesByHost.get(selfHostname);
if (localSearchNodes.size() != 1) return Optional.empty();
Node localSearchNode = localSearchNodes.iterator().next();
Group localSearchGroup = groups.get(localSearchNode.group());
if (localSearchGroup.nodes().size() != 1) return Optional.empty();
if (containerClusterSize < searchClusterSize) return Optional.empty();
return Optional.of(localSearchNode);
}
private static ImmutableList<Node> toNodes(DispatchConfig dispatchConfig) {
ImmutableList.Builder<Node> nodesBuilder = new ImmutableList.Builder<>();
for (DispatchConfig.Node node : dispatchConfig.node())
nodesBuilder.add(new Node(node.key(), node.host(), node.group()));
return nodesBuilder.build();
}
public DispatchConfig dispatchConfig() {
return dispatchConfig;
}
/** Returns the number of nodes in this cluster (across all groups) */
public int size() { return size; }
/** Returns the groups of this cluster as an immutable map indexed by group id */
public ImmutableMap<Integer, Group> groups() { return groups; }
/** Returns the groups of this cluster as an immutable list in introduction order */
public ImmutableList<Group> orderedGroups() { return orderedGroups; }
/** Returns the n'th (zero-indexed) group in the cluster if possible */
public Optional<Group> group(int n) {
if (orderedGroups.size() > n) {
return Optional.of(orderedGroups.get(n));
} else {
return Optional.empty();
}
}
/** Returns the number of nodes per group - size()/groups.size() */
public int groupSize() {
if (groups.size() == 0) return size();
return size() / groups.size();
}
public int groupsWithSufficientCoverage() {
int covered = 0;
for (Group g : orderedGroups) {
if (g.hasSufficientCoverage()) {
covered++;
}
}
return covered;
}
/**
* Returns the single, local node we should dispatch queries directly to,
* or empty if we should not dispatch directly.
*/
public Optional<Node> localCorpusDispatchTarget() {
if ( localCorpusDispatchTarget.isEmpty()) return Optional.empty();
Group localSearchGroup = groups.get(localCorpusDispatchTarget.get().group());
if ( ! localSearchGroup.hasSufficientCoverage()) return Optional.empty();
if ( localCorpusDispatchTarget.get().isWorking() == Boolean.FALSE) return Optional.empty();
return localCorpusDispatchTarget;
}
private void updateWorkingState(Node node, boolean isWorking) {
node.setWorking(isWorking);
updateVipStatusOnNodeChange(node, isWorking);
}
/** Called by the cluster monitor when node state changes to working */
@Override
public void working(Node node) {
updateWorkingState(node, true);
}
/** Called by the cluster monitor when node state changes to failed */
@Override
public void failed(Node node) {
updateWorkingState(node, false);
}
private void updateSufficientCoverage(Group group, boolean sufficientCoverage) {
if (sufficientCoverage == group.hasSufficientCoverage()) return;
group.setHasSufficientCoverage(sufficientCoverage);
updateVipStatusOnCoverageChange(group, sufficientCoverage);
}
private void updateVipStatusOnNodeChange(Node node, boolean nodeIsWorking) {
if (localCorpusDispatchTarget.isEmpty()) {
if (hasInformationAboutAllNodes())
setInRotationOnlyIf(hasWorkingNodes());
}
else if (usesLocalCorpusIn(node)) {
if (nodeIsWorking || size() > 1)
setInRotationOnlyIf(nodeIsWorking);
}
}
private void updateVipStatusOnCoverageChange(Group group, boolean sufficientCoverage) {
if ( localCorpusDispatchTarget.isEmpty()) {
}
else if (usesLocalCorpusIn(group)) {
setInRotationOnlyIf(sufficientCoverage);
}
}
private void setInRotationOnlyIf(boolean inRotation) {
if (inRotation)
vipStatus.addToRotation(clusterId);
else
vipStatus.removeFromRotation(clusterId);
}
public int estimateHitsToFetch(int wantedHits, int numPartitions, double topKProbability) {
return hitEstimator.estimateK(wantedHits, numPartitions, topKProbability);
}
public boolean hasInformationAboutAllNodes() {
return nodesByHost.values().stream().allMatch(node -> node.isWorking() != null);
}
private boolean hasWorkingNodes() {
return nodesByHost.values().stream().anyMatch(node -> node.isWorking() != Boolean.FALSE );
}
private boolean usesLocalCorpusIn(Node node) {
return localCorpusDispatchTarget.isPresent() && localCorpusDispatchTarget.get().equals(node);
}
private boolean usesLocalCorpusIn(Group group) {
return localCorpusDispatchTarget.isPresent() && localCorpusDispatchTarget.get().group() == group.id();
}
private static class PongCallback implements PongHandler {
private final ClusterMonitor<Node> clusterMonitor;
private final Node node;
PongCallback(Node node, ClusterMonitor<Node> clusterMonitor) {
this.node = node;
this.clusterMonitor = clusterMonitor;
}
@Override
public void handle(Pong pong) {
if (pong.badResponse()) {
clusterMonitor.failed(node, pong.error().get());
} else {
if (pong.activeDocuments().isPresent()) {
node.setActiveDocuments(pong.activeDocuments().get());
}
clusterMonitor.responded(node);
}
}
}
/** Used by the cluster monitor to manage node status */
@Override
public void ping(ClusterMonitor clusterMonitor, Node node, Executor executor) {
Pinger pinger = pingFactory.createPinger(node, clusterMonitor, new PongCallback(node, clusterMonitor));
pinger.ping();
}
private void pingIterationCompletedSingleGroup() {
Group group = groups.values().iterator().next();
group.aggregateActiveDocuments();
updateSufficientCoverage(group, true);
boolean fullCoverage = isGroupCoverageSufficient(group.workingNodes(), group.nodes().size(), group.getActiveDocuments(),
group.getActiveDocuments());
trackGroupCoverageChanges(0, group, fullCoverage, group.getActiveDocuments());
}
private void pingIterationCompletedMultipleGroups() {
int numGroups = orderedGroups.size();
long[] activeDocumentsInGroup = new long[numGroups];
long sumOfActiveDocuments = 0;
for(int i = 0; i < numGroups; i++) {
Group group = orderedGroups.get(i);
group.aggregateActiveDocuments();
activeDocumentsInGroup[i] = group.getActiveDocuments();
sumOfActiveDocuments += activeDocumentsInGroup[i];
}
boolean anyGroupsSufficientCoverage = false;
for (int i = 0; i < numGroups; i++) {
Group group = orderedGroups.get(i);
long activeDocuments = activeDocumentsInGroup[i];
long averageDocumentsInOtherGroups = (sumOfActiveDocuments - activeDocuments) / (numGroups - 1);
boolean sufficientCoverage = isGroupCoverageSufficient(group.workingNodes(), group.nodes().size(), activeDocuments, averageDocumentsInOtherGroups);
anyGroupsSufficientCoverage = anyGroupsSufficientCoverage || sufficientCoverage;
updateSufficientCoverage(group, sufficientCoverage);
trackGroupCoverageChanges(i, group, sufficientCoverage, averageDocumentsInOtherGroups);
}
}
/**
* Update statistics after a round of issuing pings.
* Note that this doesn't wait for pings to return, so it will typically accumulate data from
* last rounds pinging, or potentially (although unlikely) some combination of new and old data.
*/
@Override
public void pingIterationCompleted() {
int numGroups = orderedGroups.size();
if (numGroups == 1) {
pingIterationCompletedSingleGroup();
} else {
pingIterationCompletedMultipleGroups();
}
}
private boolean isGroupCoverageSufficient(int workingNodes, int nodesInGroup, long activeDocuments, long averageDocumentsInOtherGroups) {
boolean sufficientCoverage = true;
if (averageDocumentsInOtherGroups > 0) {
double coverage = 100.0 * (double) activeDocuments / averageDocumentsInOtherGroups;
sufficientCoverage = coverage >= dispatchConfig.minActivedocsPercentage();
}
if (sufficientCoverage) {
sufficientCoverage = isGroupNodeCoverageSufficient(workingNodes, nodesInGroup);
}
return sufficientCoverage;
}
private boolean isGroupNodeCoverageSufficient(int workingNodes, int nodesInGroup) {
int nodesAllowedDown = dispatchConfig.maxNodesDownPerGroup()
+ (int) (((double) nodesInGroup * (100.0 - dispatchConfig.minGroupCoverage())) / 100.0);
return workingNodes + nodesAllowedDown >= nodesInGroup;
}
/**
* Calculate whether a subset of nodes in a group has enough coverage
*/
public boolean isPartialGroupCoverageSufficient(OptionalInt knownGroupId, List<Node> nodes) {
if (orderedGroups.size() == 1) {
boolean sufficient = nodes.size() >= groupSize() - dispatchConfig.maxNodesDownPerGroup();
return sufficient;
}
if (knownGroupId.isEmpty()) {
return false;
}
int groupId = knownGroupId.getAsInt();
Group group = groups.get(groupId);
if (group == null) {
return false;
}
int nodesInGroup = group.nodes().size();
long sumOfActiveDocuments = 0;
int otherGroups = 0;
for (Group g : orderedGroups) {
if (g.id() != groupId) {
sumOfActiveDocuments += g.getActiveDocuments();
otherGroups++;
}
}
long activeDocuments = 0;
for (Node n : nodes) {
activeDocuments += n.getActiveDocuments();
}
long averageDocumentsInOtherGroups = sumOfActiveDocuments / otherGroups;
return isGroupCoverageSufficient(nodes.size(), nodesInGroup, activeDocuments, averageDocumentsInOtherGroups);
}
private void trackGroupCoverageChanges(int index, Group group, boolean fullCoverage, long averageDocuments) {
if ( ! hasInformationAboutAllNodes()) return;
boolean changed = group.isFullCoverageStatusChanged(fullCoverage);
if (changed || (!fullCoverage && System.currentTimeMillis() > nextLogTime)) {
nextLogTime = System.currentTimeMillis() + 30 * 1000;
int requiredNodes = groupSize() - dispatchConfig.maxNodesDownPerGroup();
if (fullCoverage) {
log.info(() -> String.format("Group %d is now good again (%d/%d active docs, coverage %d/%d)",
index, group.getActiveDocuments(), averageDocuments, group.workingNodes(), groupSize()));
} else {
StringBuilder missing = new StringBuilder();
for (var node : group.nodes()) {
if (node.isWorking() != Boolean.TRUE) {
missing.append('\n').append(node.toString());
}
}
log.warning(() -> String.format("Coverage of group %d is only %d/%d (requires %d) (%d/%d active docs) Failed nodes are:%s",
index, group.workingNodes(), groupSize(), requiredNodes, group.getActiveDocuments(), averageDocuments, missing.toString()));
}
}
}
} |
Might be quite useful to control this by query? How much you care can depend on use case ... | public int estimateHitsToFetch(int wantedHits, int numPartitions) {
return ((hitEstimator == null) || (numPartitions <= 1))
? wantedHits
: hitEstimator.estimateK(wantedHits, numPartitions);
} | return ((hitEstimator == null) || (numPartitions <= 1)) | public int estimateHitsToFetch(int wantedHits, int numPartitions) {
return hitEstimator.estimateK(wantedHits, numPartitions);
} | class TopKEstimator {
private final TDistribution studentT;
private final double p;
TopKEstimator(double freedom, double wantedprobability) {
this.studentT = new TDistribution(null, freedom);
p = wantedprobability;
}
double estimateExactK(double k, double n) {
double variance = k * 1/n * (1 - 1/n);
double p_inverse = 1 - (1 - p)/n;
return k/n + studentT.inverseCumulativeProbability(p_inverse) * Math.sqrt(variance);
}
int estimateK(double k, double n) {
return (int)Math.ceil(estimateExactK(k, n));
}
} | class SearchCluster implements NodeManager<Node> {
private static final Logger log = Logger.getLogger(SearchCluster.class.getName());
private final DispatchConfig dispatchConfig;
private final int size;
private final String clusterId;
private final ImmutableMap<Integer, Group> groups;
private final ImmutableMultimap<String, Node> nodesByHost;
private final ImmutableList<Group> orderedGroups;
private final VipStatus vipStatus;
private final PingFactory pingFactory;
private final TopKEstimator hitEstimator;
private long nextLogTime = 0;
/**
* A search node on this local machine having the entire corpus, which we therefore
* should prefer to dispatch directly to, or empty if there is no such local search node.
* If there is one, we also maintain the VIP status of this container based on the availability
* of the corpus on this local node (up + has coverage), such that this node is taken out of rotation
* if it only queries this cluster when the local node cannot be used, to avoid unnecessary
* cross-node network traffic.
*/
private final Optional<Node> localCorpusDispatchTarget;
public SearchCluster(String clusterId, DispatchConfig dispatchConfig, int containerClusterSize,
VipStatus vipStatus, PingFactory pingFactory) {
this.clusterId = clusterId;
this.dispatchConfig = dispatchConfig;
this.vipStatus = vipStatus;
this.pingFactory = pingFactory;
List<Node> nodes = toNodes(dispatchConfig);
this.size = nodes.size();
ImmutableMap.Builder<Integer, Group> groupsBuilder = new ImmutableMap.Builder<>();
for (Map.Entry<Integer, List<Node>> group : nodes.stream().collect(Collectors.groupingBy(Node::group)).entrySet()) {
Group g = new Group(group.getKey(), group.getValue());
groupsBuilder.put(group.getKey(), g);
}
this.groups = groupsBuilder.build();
LinkedHashMap<Integer, Group> groupIntroductionOrder = new LinkedHashMap<>();
nodes.forEach(node -> groupIntroductionOrder.put(node.group(), groups.get(node.group())));
this.orderedGroups = ImmutableList.<Group>builder().addAll(groupIntroductionOrder.values()).build();
ImmutableMultimap.Builder<String, Node> nodesByHostBuilder = new ImmutableMultimap.Builder<>();
for (Node node : nodes)
nodesByHostBuilder.put(node.hostname(), node);
this.nodesByHost = nodesByHostBuilder.build();
hitEstimator = new TopKEstimator(30.0, dispatchConfig.topKProbability());
this.localCorpusDispatchTarget = findLocalCorpusDispatchTarget(HostName.getLocalhost(),
size,
containerClusterSize,
nodesByHost,
groups);
}
/* Testing only */
public SearchCluster(String clusterId, DispatchConfig dispatchConfig,
VipStatus vipStatus, PingFactory pingFactory) {
this(clusterId, dispatchConfig, 1, vipStatus, pingFactory);
}
public void addMonitoring(ClusterMonitor clusterMonitor) {
for (var group : orderedGroups) {
for (var node : group.nodes())
clusterMonitor.add(node, true);
}
}
private static Optional<Node> findLocalCorpusDispatchTarget(String selfHostname,
int searchClusterSize,
int containerClusterSize,
ImmutableMultimap<String, Node> nodesByHost,
ImmutableMap<Integer, Group> groups) {
ImmutableCollection<Node> localSearchNodes = nodesByHost.get(selfHostname);
if (localSearchNodes.size() != 1) return Optional.empty();
Node localSearchNode = localSearchNodes.iterator().next();
Group localSearchGroup = groups.get(localSearchNode.group());
if (localSearchGroup.nodes().size() != 1) return Optional.empty();
if (containerClusterSize < searchClusterSize) return Optional.empty();
return Optional.of(localSearchNode);
}
private static ImmutableList<Node> toNodes(DispatchConfig dispatchConfig) {
ImmutableList.Builder<Node> nodesBuilder = new ImmutableList.Builder<>();
for (DispatchConfig.Node node : dispatchConfig.node())
nodesBuilder.add(new Node(node.key(), node.host(), node.group()));
return nodesBuilder.build();
}
public DispatchConfig dispatchConfig() {
return dispatchConfig;
}
/** Returns the number of nodes in this cluster (across all groups) */
public int size() { return size; }
/** Returns the groups of this cluster as an immutable map indexed by group id */
public ImmutableMap<Integer, Group> groups() { return groups; }
/** Returns the groups of this cluster as an immutable list in introduction order */
public ImmutableList<Group> orderedGroups() { return orderedGroups; }
/** Returns the n'th (zero-indexed) group in the cluster if possible */
public Optional<Group> group(int n) {
if (orderedGroups.size() > n) {
return Optional.of(orderedGroups.get(n));
} else {
return Optional.empty();
}
}
/** Returns the number of nodes per group - size()/groups.size() */
public int groupSize() {
if (groups.size() == 0) return size();
return size() / groups.size();
}
public int groupsWithSufficientCoverage() {
int covered = 0;
for (Group g : orderedGroups) {
if (g.hasSufficientCoverage()) {
covered++;
}
}
return covered;
}
/**
* Returns the single, local node we should dispatch queries directly to,
* or empty if we should not dispatch directly.
*/
public Optional<Node> localCorpusDispatchTarget() {
if ( localCorpusDispatchTarget.isEmpty()) return Optional.empty();
Group localSearchGroup = groups.get(localCorpusDispatchTarget.get().group());
if ( ! localSearchGroup.hasSufficientCoverage()) return Optional.empty();
if ( localCorpusDispatchTarget.get().isWorking() == Boolean.FALSE) return Optional.empty();
return localCorpusDispatchTarget;
}
private void updateWorkingState(Node node, boolean isWorking) {
node.setWorking(isWorking);
updateVipStatusOnNodeChange(node, isWorking);
}
/** Called by the cluster monitor when node state changes to working */
@Override
public void working(Node node) {
updateWorkingState(node, true);
}
/** Called by the cluster monitor when node state changes to failed */
@Override
public void failed(Node node) {
updateWorkingState(node, false);
}
private void updateSufficientCoverage(Group group, boolean sufficientCoverage) {
if (sufficientCoverage == group.hasSufficientCoverage()) return;
group.setHasSufficientCoverage(sufficientCoverage);
updateVipStatusOnCoverageChange(group, sufficientCoverage);
}
private void updateVipStatusOnNodeChange(Node node, boolean nodeIsWorking) {
if (localCorpusDispatchTarget.isEmpty()) {
if (hasInformationAboutAllNodes())
setInRotationOnlyIf(hasWorkingNodes());
}
else if (usesLocalCorpusIn(node)) {
if (nodeIsWorking || size() > 1)
setInRotationOnlyIf(nodeIsWorking);
}
}
private void updateVipStatusOnCoverageChange(Group group, boolean sufficientCoverage) {
if ( localCorpusDispatchTarget.isEmpty()) {
}
else if (usesLocalCorpusIn(group)) {
setInRotationOnlyIf(sufficientCoverage);
}
}
private void setInRotationOnlyIf(boolean inRotation) {
if (inRotation)
vipStatus.addToRotation(clusterId);
else
vipStatus.removeFromRotation(clusterId);
}
public int estimateHitsToFetch(int wantedHits, int numPartitions, double topKProbability) {
return hitEstimator.estimateK(wantedHits, numPartitions, topKProbability);
}
public boolean hasInformationAboutAllNodes() {
return nodesByHost.values().stream().allMatch(node -> node.isWorking() != null);
}
private boolean hasWorkingNodes() {
return nodesByHost.values().stream().anyMatch(node -> node.isWorking() != Boolean.FALSE );
}
private boolean usesLocalCorpusIn(Node node) {
return localCorpusDispatchTarget.isPresent() && localCorpusDispatchTarget.get().equals(node);
}
private boolean usesLocalCorpusIn(Group group) {
return localCorpusDispatchTarget.isPresent() && localCorpusDispatchTarget.get().group() == group.id();
}
private static class PongCallback implements PongHandler {
private final ClusterMonitor<Node> clusterMonitor;
private final Node node;
PongCallback(Node node, ClusterMonitor<Node> clusterMonitor) {
this.node = node;
this.clusterMonitor = clusterMonitor;
}
@Override
public void handle(Pong pong) {
if (pong.badResponse()) {
clusterMonitor.failed(node, pong.error().get());
} else {
if (pong.activeDocuments().isPresent()) {
node.setActiveDocuments(pong.activeDocuments().get());
}
clusterMonitor.responded(node);
}
}
}
/** Used by the cluster monitor to manage node status */
@Override
public void ping(ClusterMonitor clusterMonitor, Node node, Executor executor) {
Pinger pinger = pingFactory.createPinger(node, clusterMonitor, new PongCallback(node, clusterMonitor));
pinger.ping();
}
private void pingIterationCompletedSingleGroup() {
Group group = groups.values().iterator().next();
group.aggregateActiveDocuments();
updateSufficientCoverage(group, true);
boolean fullCoverage = isGroupCoverageSufficient(group.workingNodes(), group.nodes().size(), group.getActiveDocuments(),
group.getActiveDocuments());
trackGroupCoverageChanges(0, group, fullCoverage, group.getActiveDocuments());
}
private void pingIterationCompletedMultipleGroups() {
int numGroups = orderedGroups.size();
long[] activeDocumentsInGroup = new long[numGroups];
long sumOfActiveDocuments = 0;
for(int i = 0; i < numGroups; i++) {
Group group = orderedGroups.get(i);
group.aggregateActiveDocuments();
activeDocumentsInGroup[i] = group.getActiveDocuments();
sumOfActiveDocuments += activeDocumentsInGroup[i];
}
boolean anyGroupsSufficientCoverage = false;
for (int i = 0; i < numGroups; i++) {
Group group = orderedGroups.get(i);
long activeDocuments = activeDocumentsInGroup[i];
long averageDocumentsInOtherGroups = (sumOfActiveDocuments - activeDocuments) / (numGroups - 1);
boolean sufficientCoverage = isGroupCoverageSufficient(group.workingNodes(), group.nodes().size(), activeDocuments, averageDocumentsInOtherGroups);
anyGroupsSufficientCoverage = anyGroupsSufficientCoverage || sufficientCoverage;
updateSufficientCoverage(group, sufficientCoverage);
trackGroupCoverageChanges(i, group, sufficientCoverage, averageDocumentsInOtherGroups);
}
}
/**
* Update statistics after a round of issuing pings.
* Note that this doesn't wait for pings to return, so it will typically accumulate data from
* last rounds pinging, or potentially (although unlikely) some combination of new and old data.
*/
@Override
public void pingIterationCompleted() {
int numGroups = orderedGroups.size();
if (numGroups == 1) {
pingIterationCompletedSingleGroup();
} else {
pingIterationCompletedMultipleGroups();
}
}
private boolean isGroupCoverageSufficient(int workingNodes, int nodesInGroup, long activeDocuments, long averageDocumentsInOtherGroups) {
boolean sufficientCoverage = true;
if (averageDocumentsInOtherGroups > 0) {
double coverage = 100.0 * (double) activeDocuments / averageDocumentsInOtherGroups;
sufficientCoverage = coverage >= dispatchConfig.minActivedocsPercentage();
}
if (sufficientCoverage) {
sufficientCoverage = isGroupNodeCoverageSufficient(workingNodes, nodesInGroup);
}
return sufficientCoverage;
}
private boolean isGroupNodeCoverageSufficient(int workingNodes, int nodesInGroup) {
int nodesAllowedDown = dispatchConfig.maxNodesDownPerGroup()
+ (int) (((double) nodesInGroup * (100.0 - dispatchConfig.minGroupCoverage())) / 100.0);
return workingNodes + nodesAllowedDown >= nodesInGroup;
}
/**
* Calculate whether a subset of nodes in a group has enough coverage
*/
public boolean isPartialGroupCoverageSufficient(OptionalInt knownGroupId, List<Node> nodes) {
if (orderedGroups.size() == 1) {
boolean sufficient = nodes.size() >= groupSize() - dispatchConfig.maxNodesDownPerGroup();
return sufficient;
}
if (knownGroupId.isEmpty()) {
return false;
}
int groupId = knownGroupId.getAsInt();
Group group = groups.get(groupId);
if (group == null) {
return false;
}
int nodesInGroup = group.nodes().size();
long sumOfActiveDocuments = 0;
int otherGroups = 0;
for (Group g : orderedGroups) {
if (g.id() != groupId) {
sumOfActiveDocuments += g.getActiveDocuments();
otherGroups++;
}
}
long activeDocuments = 0;
for (Node n : nodes) {
activeDocuments += n.getActiveDocuments();
}
long averageDocumentsInOtherGroups = sumOfActiveDocuments / otherGroups;
return isGroupCoverageSufficient(nodes.size(), nodesInGroup, activeDocuments, averageDocumentsInOtherGroups);
}
private void trackGroupCoverageChanges(int index, Group group, boolean fullCoverage, long averageDocuments) {
if ( ! hasInformationAboutAllNodes()) return;
boolean changed = group.isFullCoverageStatusChanged(fullCoverage);
if (changed || (!fullCoverage && System.currentTimeMillis() > nextLogTime)) {
nextLogTime = System.currentTimeMillis() + 30 * 1000;
int requiredNodes = groupSize() - dispatchConfig.maxNodesDownPerGroup();
if (fullCoverage) {
log.info(() -> String.format("Group %d is now good again (%d/%d active docs, coverage %d/%d)",
index, group.getActiveDocuments(), averageDocuments, group.workingNodes(), groupSize()));
} else {
StringBuilder missing = new StringBuilder();
for (var node : group.nodes()) {
if (node.isWorking() != Boolean.TRUE) {
missing.append('\n').append(node.toString());
}
}
log.warning(() -> String.format("Coverage of group %d is only %d/%d (requires %d) (%d/%d active docs) Failed nodes are:%s",
index, group.workingNodes(), groupSize(), requiredNodes, group.getActiveDocuments(), averageDocuments, missing.toString()));
}
}
}
} |
Moved. Will add query too. | public int estimateHitsToFetch(int wantedHits, int numPartitions) {
return ((hitEstimator == null) || (numPartitions <= 1))
? wantedHits
: hitEstimator.estimateK(wantedHits, numPartitions);
} | return ((hitEstimator == null) || (numPartitions <= 1)) | public int estimateHitsToFetch(int wantedHits, int numPartitions) {
return hitEstimator.estimateK(wantedHits, numPartitions);
} | class TopKEstimator {
private final TDistribution studentT;
private final double p;
TopKEstimator(double freedom, double wantedprobability) {
this.studentT = new TDistribution(null, freedom);
p = wantedprobability;
}
double estimateExactK(double k, double n) {
double variance = k * 1/n * (1 - 1/n);
double p_inverse = 1 - (1 - p)/n;
return k/n + studentT.inverseCumulativeProbability(p_inverse) * Math.sqrt(variance);
}
int estimateK(double k, double n) {
return (int)Math.ceil(estimateExactK(k, n));
}
} | class SearchCluster implements NodeManager<Node> {
private static final Logger log = Logger.getLogger(SearchCluster.class.getName());
private final DispatchConfig dispatchConfig;
private final int size;
private final String clusterId;
private final ImmutableMap<Integer, Group> groups;
private final ImmutableMultimap<String, Node> nodesByHost;
private final ImmutableList<Group> orderedGroups;
private final VipStatus vipStatus;
private final PingFactory pingFactory;
private final TopKEstimator hitEstimator;
private long nextLogTime = 0;
/**
* A search node on this local machine having the entire corpus, which we therefore
* should prefer to dispatch directly to, or empty if there is no such local search node.
* If there is one, we also maintain the VIP status of this container based on the availability
* of the corpus on this local node (up + has coverage), such that this node is taken out of rotation
* if it only queries this cluster when the local node cannot be used, to avoid unnecessary
* cross-node network traffic.
*/
private final Optional<Node> localCorpusDispatchTarget;
public SearchCluster(String clusterId, DispatchConfig dispatchConfig, int containerClusterSize,
VipStatus vipStatus, PingFactory pingFactory) {
this.clusterId = clusterId;
this.dispatchConfig = dispatchConfig;
this.vipStatus = vipStatus;
this.pingFactory = pingFactory;
List<Node> nodes = toNodes(dispatchConfig);
this.size = nodes.size();
ImmutableMap.Builder<Integer, Group> groupsBuilder = new ImmutableMap.Builder<>();
for (Map.Entry<Integer, List<Node>> group : nodes.stream().collect(Collectors.groupingBy(Node::group)).entrySet()) {
Group g = new Group(group.getKey(), group.getValue());
groupsBuilder.put(group.getKey(), g);
}
this.groups = groupsBuilder.build();
LinkedHashMap<Integer, Group> groupIntroductionOrder = new LinkedHashMap<>();
nodes.forEach(node -> groupIntroductionOrder.put(node.group(), groups.get(node.group())));
this.orderedGroups = ImmutableList.<Group>builder().addAll(groupIntroductionOrder.values()).build();
ImmutableMultimap.Builder<String, Node> nodesByHostBuilder = new ImmutableMultimap.Builder<>();
for (Node node : nodes)
nodesByHostBuilder.put(node.hostname(), node);
this.nodesByHost = nodesByHostBuilder.build();
hitEstimator = new TopKEstimator(30.0, dispatchConfig.topKProbability());
this.localCorpusDispatchTarget = findLocalCorpusDispatchTarget(HostName.getLocalhost(),
size,
containerClusterSize,
nodesByHost,
groups);
}
/* Testing only */
public SearchCluster(String clusterId, DispatchConfig dispatchConfig,
VipStatus vipStatus, PingFactory pingFactory) {
this(clusterId, dispatchConfig, 1, vipStatus, pingFactory);
}
public void addMonitoring(ClusterMonitor clusterMonitor) {
for (var group : orderedGroups) {
for (var node : group.nodes())
clusterMonitor.add(node, true);
}
}
private static Optional<Node> findLocalCorpusDispatchTarget(String selfHostname,
int searchClusterSize,
int containerClusterSize,
ImmutableMultimap<String, Node> nodesByHost,
ImmutableMap<Integer, Group> groups) {
ImmutableCollection<Node> localSearchNodes = nodesByHost.get(selfHostname);
if (localSearchNodes.size() != 1) return Optional.empty();
Node localSearchNode = localSearchNodes.iterator().next();
Group localSearchGroup = groups.get(localSearchNode.group());
if (localSearchGroup.nodes().size() != 1) return Optional.empty();
if (containerClusterSize < searchClusterSize) return Optional.empty();
return Optional.of(localSearchNode);
}
private static ImmutableList<Node> toNodes(DispatchConfig dispatchConfig) {
ImmutableList.Builder<Node> nodesBuilder = new ImmutableList.Builder<>();
for (DispatchConfig.Node node : dispatchConfig.node())
nodesBuilder.add(new Node(node.key(), node.host(), node.group()));
return nodesBuilder.build();
}
public DispatchConfig dispatchConfig() {
return dispatchConfig;
}
/** Returns the number of nodes in this cluster (across all groups) */
public int size() { return size; }
/** Returns the groups of this cluster as an immutable map indexed by group id */
public ImmutableMap<Integer, Group> groups() { return groups; }
/** Returns the groups of this cluster as an immutable list in introduction order */
public ImmutableList<Group> orderedGroups() { return orderedGroups; }
/** Returns the n'th (zero-indexed) group in the cluster if possible */
public Optional<Group> group(int n) {
if (orderedGroups.size() > n) {
return Optional.of(orderedGroups.get(n));
} else {
return Optional.empty();
}
}
/** Returns the number of nodes per group - size()/groups.size() */
public int groupSize() {
if (groups.size() == 0) return size();
return size() / groups.size();
}
public int groupsWithSufficientCoverage() {
int covered = 0;
for (Group g : orderedGroups) {
if (g.hasSufficientCoverage()) {
covered++;
}
}
return covered;
}
/**
* Returns the single, local node we should dispatch queries directly to,
* or empty if we should not dispatch directly.
*/
public Optional<Node> localCorpusDispatchTarget() {
if ( localCorpusDispatchTarget.isEmpty()) return Optional.empty();
Group localSearchGroup = groups.get(localCorpusDispatchTarget.get().group());
if ( ! localSearchGroup.hasSufficientCoverage()) return Optional.empty();
if ( localCorpusDispatchTarget.get().isWorking() == Boolean.FALSE) return Optional.empty();
return localCorpusDispatchTarget;
}
private void updateWorkingState(Node node, boolean isWorking) {
node.setWorking(isWorking);
updateVipStatusOnNodeChange(node, isWorking);
}
/** Called by the cluster monitor when node state changes to working */
@Override
public void working(Node node) {
updateWorkingState(node, true);
}
/** Called by the cluster monitor when node state changes to failed */
@Override
public void failed(Node node) {
updateWorkingState(node, false);
}
private void updateSufficientCoverage(Group group, boolean sufficientCoverage) {
if (sufficientCoverage == group.hasSufficientCoverage()) return;
group.setHasSufficientCoverage(sufficientCoverage);
updateVipStatusOnCoverageChange(group, sufficientCoverage);
}
private void updateVipStatusOnNodeChange(Node node, boolean nodeIsWorking) {
if (localCorpusDispatchTarget.isEmpty()) {
if (hasInformationAboutAllNodes())
setInRotationOnlyIf(hasWorkingNodes());
}
else if (usesLocalCorpusIn(node)) {
if (nodeIsWorking || size() > 1)
setInRotationOnlyIf(nodeIsWorking);
}
}
private void updateVipStatusOnCoverageChange(Group group, boolean sufficientCoverage) {
if ( localCorpusDispatchTarget.isEmpty()) {
}
else if (usesLocalCorpusIn(group)) {
setInRotationOnlyIf(sufficientCoverage);
}
}
private void setInRotationOnlyIf(boolean inRotation) {
if (inRotation)
vipStatus.addToRotation(clusterId);
else
vipStatus.removeFromRotation(clusterId);
}
public int estimateHitsToFetch(int wantedHits, int numPartitions, double topKProbability) {
return hitEstimator.estimateK(wantedHits, numPartitions, topKProbability);
}
public boolean hasInformationAboutAllNodes() {
return nodesByHost.values().stream().allMatch(node -> node.isWorking() != null);
}
private boolean hasWorkingNodes() {
return nodesByHost.values().stream().anyMatch(node -> node.isWorking() != Boolean.FALSE );
}
private boolean usesLocalCorpusIn(Node node) {
return localCorpusDispatchTarget.isPresent() && localCorpusDispatchTarget.get().equals(node);
}
private boolean usesLocalCorpusIn(Group group) {
return localCorpusDispatchTarget.isPresent() && localCorpusDispatchTarget.get().group() == group.id();
}
private static class PongCallback implements PongHandler {
private final ClusterMonitor<Node> clusterMonitor;
private final Node node;
PongCallback(Node node, ClusterMonitor<Node> clusterMonitor) {
this.node = node;
this.clusterMonitor = clusterMonitor;
}
@Override
public void handle(Pong pong) {
if (pong.badResponse()) {
clusterMonitor.failed(node, pong.error().get());
} else {
if (pong.activeDocuments().isPresent()) {
node.setActiveDocuments(pong.activeDocuments().get());
}
clusterMonitor.responded(node);
}
}
}
/** Used by the cluster monitor to manage node status */
@Override
public void ping(ClusterMonitor clusterMonitor, Node node, Executor executor) {
Pinger pinger = pingFactory.createPinger(node, clusterMonitor, new PongCallback(node, clusterMonitor));
pinger.ping();
}
private void pingIterationCompletedSingleGroup() {
Group group = groups.values().iterator().next();
group.aggregateActiveDocuments();
updateSufficientCoverage(group, true);
boolean fullCoverage = isGroupCoverageSufficient(group.workingNodes(), group.nodes().size(), group.getActiveDocuments(),
group.getActiveDocuments());
trackGroupCoverageChanges(0, group, fullCoverage, group.getActiveDocuments());
}
private void pingIterationCompletedMultipleGroups() {
int numGroups = orderedGroups.size();
long[] activeDocumentsInGroup = new long[numGroups];
long sumOfActiveDocuments = 0;
for(int i = 0; i < numGroups; i++) {
Group group = orderedGroups.get(i);
group.aggregateActiveDocuments();
activeDocumentsInGroup[i] = group.getActiveDocuments();
sumOfActiveDocuments += activeDocumentsInGroup[i];
}
boolean anyGroupsSufficientCoverage = false;
for (int i = 0; i < numGroups; i++) {
Group group = orderedGroups.get(i);
long activeDocuments = activeDocumentsInGroup[i];
long averageDocumentsInOtherGroups = (sumOfActiveDocuments - activeDocuments) / (numGroups - 1);
boolean sufficientCoverage = isGroupCoverageSufficient(group.workingNodes(), group.nodes().size(), activeDocuments, averageDocumentsInOtherGroups);
anyGroupsSufficientCoverage = anyGroupsSufficientCoverage || sufficientCoverage;
updateSufficientCoverage(group, sufficientCoverage);
trackGroupCoverageChanges(i, group, sufficientCoverage, averageDocumentsInOtherGroups);
}
}
/**
* Update statistics after a round of issuing pings.
* Note that this doesn't wait for pings to return, so it will typically accumulate data from
* last rounds pinging, or potentially (although unlikely) some combination of new and old data.
*/
@Override
public void pingIterationCompleted() {
int numGroups = orderedGroups.size();
if (numGroups == 1) {
pingIterationCompletedSingleGroup();
} else {
pingIterationCompletedMultipleGroups();
}
}
private boolean isGroupCoverageSufficient(int workingNodes, int nodesInGroup, long activeDocuments, long averageDocumentsInOtherGroups) {
boolean sufficientCoverage = true;
if (averageDocumentsInOtherGroups > 0) {
double coverage = 100.0 * (double) activeDocuments / averageDocumentsInOtherGroups;
sufficientCoverage = coverage >= dispatchConfig.minActivedocsPercentage();
}
if (sufficientCoverage) {
sufficientCoverage = isGroupNodeCoverageSufficient(workingNodes, nodesInGroup);
}
return sufficientCoverage;
}
private boolean isGroupNodeCoverageSufficient(int workingNodes, int nodesInGroup) {
int nodesAllowedDown = dispatchConfig.maxNodesDownPerGroup()
+ (int) (((double) nodesInGroup * (100.0 - dispatchConfig.minGroupCoverage())) / 100.0);
return workingNodes + nodesAllowedDown >= nodesInGroup;
}
/**
* Calculate whether a subset of nodes in a group has enough coverage
*/
public boolean isPartialGroupCoverageSufficient(OptionalInt knownGroupId, List<Node> nodes) {
if (orderedGroups.size() == 1) {
boolean sufficient = nodes.size() >= groupSize() - dispatchConfig.maxNodesDownPerGroup();
return sufficient;
}
if (knownGroupId.isEmpty()) {
return false;
}
int groupId = knownGroupId.getAsInt();
Group group = groups.get(groupId);
if (group == null) {
return false;
}
int nodesInGroup = group.nodes().size();
long sumOfActiveDocuments = 0;
int otherGroups = 0;
for (Group g : orderedGroups) {
if (g.id() != groupId) {
sumOfActiveDocuments += g.getActiveDocuments();
otherGroups++;
}
}
long activeDocuments = 0;
for (Node n : nodes) {
activeDocuments += n.getActiveDocuments();
}
long averageDocumentsInOtherGroups = sumOfActiveDocuments / otherGroups;
return isGroupCoverageSufficient(nodes.size(), nodesInGroup, activeDocuments, averageDocumentsInOtherGroups);
}
private void trackGroupCoverageChanges(int index, Group group, boolean fullCoverage, long averageDocuments) {
if ( ! hasInformationAboutAllNodes()) return;
boolean changed = group.isFullCoverageStatusChanged(fullCoverage);
if (changed || (!fullCoverage && System.currentTimeMillis() > nextLogTime)) {
nextLogTime = System.currentTimeMillis() + 30 * 1000;
int requiredNodes = groupSize() - dispatchConfig.maxNodesDownPerGroup();
if (fullCoverage) {
log.info(() -> String.format("Group %d is now good again (%d/%d active docs, coverage %d/%d)",
index, group.getActiveDocuments(), averageDocuments, group.workingNodes(), groupSize()));
} else {
StringBuilder missing = new StringBuilder();
for (var node : group.nodes()) {
if (node.isWorking() != Boolean.TRUE) {
missing.append('\n').append(node.toString());
}
}
log.warning(() -> String.format("Coverage of group %d is only %d/%d (requires %d) (%d/%d active docs) Failed nodes are:%s",
index, group.workingNodes(), groupSize(), requiredNodes, group.getActiveDocuments(), averageDocuments, missing.toString()));
}
}
}
} |
Now I have added query side control. dispatch.top-k-probability. Can '-' be used in query without requiring any escaping ? I used that to have it identical with services.xml. | public int estimateHitsToFetch(int wantedHits, int numPartitions) {
return ((hitEstimator == null) || (numPartitions <= 1))
? wantedHits
: hitEstimator.estimateK(wantedHits, numPartitions);
} | return ((hitEstimator == null) || (numPartitions <= 1)) | public int estimateHitsToFetch(int wantedHits, int numPartitions) {
return hitEstimator.estimateK(wantedHits, numPartitions);
} | class TopKEstimator {
private final TDistribution studentT;
private final double p;
TopKEstimator(double freedom, double wantedprobability) {
this.studentT = new TDistribution(null, freedom);
p = wantedprobability;
}
double estimateExactK(double k, double n) {
double variance = k * 1/n * (1 - 1/n);
double p_inverse = 1 - (1 - p)/n;
return k/n + studentT.inverseCumulativeProbability(p_inverse) * Math.sqrt(variance);
}
int estimateK(double k, double n) {
return (int)Math.ceil(estimateExactK(k, n));
}
} | class SearchCluster implements NodeManager<Node> {
private static final Logger log = Logger.getLogger(SearchCluster.class.getName());
private final DispatchConfig dispatchConfig;
private final int size;
private final String clusterId;
private final ImmutableMap<Integer, Group> groups;
private final ImmutableMultimap<String, Node> nodesByHost;
private final ImmutableList<Group> orderedGroups;
private final VipStatus vipStatus;
private final PingFactory pingFactory;
private final TopKEstimator hitEstimator;
private long nextLogTime = 0;
/**
* A search node on this local machine having the entire corpus, which we therefore
* should prefer to dispatch directly to, or empty if there is no such local search node.
* If there is one, we also maintain the VIP status of this container based on the availability
* of the corpus on this local node (up + has coverage), such that this node is taken out of rotation
* if it only queries this cluster when the local node cannot be used, to avoid unnecessary
* cross-node network traffic.
*/
private final Optional<Node> localCorpusDispatchTarget;
public SearchCluster(String clusterId, DispatchConfig dispatchConfig, int containerClusterSize,
VipStatus vipStatus, PingFactory pingFactory) {
this.clusterId = clusterId;
this.dispatchConfig = dispatchConfig;
this.vipStatus = vipStatus;
this.pingFactory = pingFactory;
List<Node> nodes = toNodes(dispatchConfig);
this.size = nodes.size();
ImmutableMap.Builder<Integer, Group> groupsBuilder = new ImmutableMap.Builder<>();
for (Map.Entry<Integer, List<Node>> group : nodes.stream().collect(Collectors.groupingBy(Node::group)).entrySet()) {
Group g = new Group(group.getKey(), group.getValue());
groupsBuilder.put(group.getKey(), g);
}
this.groups = groupsBuilder.build();
LinkedHashMap<Integer, Group> groupIntroductionOrder = new LinkedHashMap<>();
nodes.forEach(node -> groupIntroductionOrder.put(node.group(), groups.get(node.group())));
this.orderedGroups = ImmutableList.<Group>builder().addAll(groupIntroductionOrder.values()).build();
ImmutableMultimap.Builder<String, Node> nodesByHostBuilder = new ImmutableMultimap.Builder<>();
for (Node node : nodes)
nodesByHostBuilder.put(node.hostname(), node);
this.nodesByHost = nodesByHostBuilder.build();
hitEstimator = new TopKEstimator(30.0, dispatchConfig.topKProbability());
this.localCorpusDispatchTarget = findLocalCorpusDispatchTarget(HostName.getLocalhost(),
size,
containerClusterSize,
nodesByHost,
groups);
}
/* Testing only */
public SearchCluster(String clusterId, DispatchConfig dispatchConfig,
VipStatus vipStatus, PingFactory pingFactory) {
this(clusterId, dispatchConfig, 1, vipStatus, pingFactory);
}
public void addMonitoring(ClusterMonitor clusterMonitor) {
for (var group : orderedGroups) {
for (var node : group.nodes())
clusterMonitor.add(node, true);
}
}
private static Optional<Node> findLocalCorpusDispatchTarget(String selfHostname,
int searchClusterSize,
int containerClusterSize,
ImmutableMultimap<String, Node> nodesByHost,
ImmutableMap<Integer, Group> groups) {
ImmutableCollection<Node> localSearchNodes = nodesByHost.get(selfHostname);
if (localSearchNodes.size() != 1) return Optional.empty();
Node localSearchNode = localSearchNodes.iterator().next();
Group localSearchGroup = groups.get(localSearchNode.group());
if (localSearchGroup.nodes().size() != 1) return Optional.empty();
if (containerClusterSize < searchClusterSize) return Optional.empty();
return Optional.of(localSearchNode);
}
private static ImmutableList<Node> toNodes(DispatchConfig dispatchConfig) {
ImmutableList.Builder<Node> nodesBuilder = new ImmutableList.Builder<>();
for (DispatchConfig.Node node : dispatchConfig.node())
nodesBuilder.add(new Node(node.key(), node.host(), node.group()));
return nodesBuilder.build();
}
public DispatchConfig dispatchConfig() {
return dispatchConfig;
}
/** Returns the number of nodes in this cluster (across all groups) */
public int size() { return size; }
/** Returns the groups of this cluster as an immutable map indexed by group id */
public ImmutableMap<Integer, Group> groups() { return groups; }
/** Returns the groups of this cluster as an immutable list in introduction order */
public ImmutableList<Group> orderedGroups() { return orderedGroups; }
/** Returns the n'th (zero-indexed) group in the cluster if possible */
public Optional<Group> group(int n) {
if (orderedGroups.size() > n) {
return Optional.of(orderedGroups.get(n));
} else {
return Optional.empty();
}
}
/** Returns the number of nodes per group - size()/groups.size() */
public int groupSize() {
if (groups.size() == 0) return size();
return size() / groups.size();
}
public int groupsWithSufficientCoverage() {
int covered = 0;
for (Group g : orderedGroups) {
if (g.hasSufficientCoverage()) {
covered++;
}
}
return covered;
}
/**
* Returns the single, local node we should dispatch queries directly to,
* or empty if we should not dispatch directly.
*/
public Optional<Node> localCorpusDispatchTarget() {
if ( localCorpusDispatchTarget.isEmpty()) return Optional.empty();
Group localSearchGroup = groups.get(localCorpusDispatchTarget.get().group());
if ( ! localSearchGroup.hasSufficientCoverage()) return Optional.empty();
if ( localCorpusDispatchTarget.get().isWorking() == Boolean.FALSE) return Optional.empty();
return localCorpusDispatchTarget;
}
private void updateWorkingState(Node node, boolean isWorking) {
node.setWorking(isWorking);
updateVipStatusOnNodeChange(node, isWorking);
}
/** Called by the cluster monitor when node state changes to working */
@Override
public void working(Node node) {
updateWorkingState(node, true);
}
/** Called by the cluster monitor when node state changes to failed */
@Override
public void failed(Node node) {
updateWorkingState(node, false);
}
private void updateSufficientCoverage(Group group, boolean sufficientCoverage) {
if (sufficientCoverage == group.hasSufficientCoverage()) return;
group.setHasSufficientCoverage(sufficientCoverage);
updateVipStatusOnCoverageChange(group, sufficientCoverage);
}
private void updateVipStatusOnNodeChange(Node node, boolean nodeIsWorking) {
if (localCorpusDispatchTarget.isEmpty()) {
if (hasInformationAboutAllNodes())
setInRotationOnlyIf(hasWorkingNodes());
}
else if (usesLocalCorpusIn(node)) {
if (nodeIsWorking || size() > 1)
setInRotationOnlyIf(nodeIsWorking);
}
}
private void updateVipStatusOnCoverageChange(Group group, boolean sufficientCoverage) {
if ( localCorpusDispatchTarget.isEmpty()) {
}
else if (usesLocalCorpusIn(group)) {
setInRotationOnlyIf(sufficientCoverage);
}
}
private void setInRotationOnlyIf(boolean inRotation) {
if (inRotation)
vipStatus.addToRotation(clusterId);
else
vipStatus.removeFromRotation(clusterId);
}
public int estimateHitsToFetch(int wantedHits, int numPartitions, double topKProbability) {
return hitEstimator.estimateK(wantedHits, numPartitions, topKProbability);
}
public boolean hasInformationAboutAllNodes() {
return nodesByHost.values().stream().allMatch(node -> node.isWorking() != null);
}
private boolean hasWorkingNodes() {
return nodesByHost.values().stream().anyMatch(node -> node.isWorking() != Boolean.FALSE );
}
private boolean usesLocalCorpusIn(Node node) {
return localCorpusDispatchTarget.isPresent() && localCorpusDispatchTarget.get().equals(node);
}
private boolean usesLocalCorpusIn(Group group) {
return localCorpusDispatchTarget.isPresent() && localCorpusDispatchTarget.get().group() == group.id();
}
private static class PongCallback implements PongHandler {
private final ClusterMonitor<Node> clusterMonitor;
private final Node node;
PongCallback(Node node, ClusterMonitor<Node> clusterMonitor) {
this.node = node;
this.clusterMonitor = clusterMonitor;
}
@Override
public void handle(Pong pong) {
if (pong.badResponse()) {
clusterMonitor.failed(node, pong.error().get());
} else {
if (pong.activeDocuments().isPresent()) {
node.setActiveDocuments(pong.activeDocuments().get());
}
clusterMonitor.responded(node);
}
}
}
/** Used by the cluster monitor to manage node status */
@Override
public void ping(ClusterMonitor clusterMonitor, Node node, Executor executor) {
Pinger pinger = pingFactory.createPinger(node, clusterMonitor, new PongCallback(node, clusterMonitor));
pinger.ping();
}
private void pingIterationCompletedSingleGroup() {
Group group = groups.values().iterator().next();
group.aggregateActiveDocuments();
updateSufficientCoverage(group, true);
boolean fullCoverage = isGroupCoverageSufficient(group.workingNodes(), group.nodes().size(), group.getActiveDocuments(),
group.getActiveDocuments());
trackGroupCoverageChanges(0, group, fullCoverage, group.getActiveDocuments());
}
private void pingIterationCompletedMultipleGroups() {
int numGroups = orderedGroups.size();
long[] activeDocumentsInGroup = new long[numGroups];
long sumOfActiveDocuments = 0;
for(int i = 0; i < numGroups; i++) {
Group group = orderedGroups.get(i);
group.aggregateActiveDocuments();
activeDocumentsInGroup[i] = group.getActiveDocuments();
sumOfActiveDocuments += activeDocumentsInGroup[i];
}
boolean anyGroupsSufficientCoverage = false;
for (int i = 0; i < numGroups; i++) {
Group group = orderedGroups.get(i);
long activeDocuments = activeDocumentsInGroup[i];
long averageDocumentsInOtherGroups = (sumOfActiveDocuments - activeDocuments) / (numGroups - 1);
boolean sufficientCoverage = isGroupCoverageSufficient(group.workingNodes(), group.nodes().size(), activeDocuments, averageDocumentsInOtherGroups);
anyGroupsSufficientCoverage = anyGroupsSufficientCoverage || sufficientCoverage;
updateSufficientCoverage(group, sufficientCoverage);
trackGroupCoverageChanges(i, group, sufficientCoverage, averageDocumentsInOtherGroups);
}
}
/**
* Update statistics after a round of issuing pings.
* Note that this doesn't wait for pings to return, so it will typically accumulate data from
* last rounds pinging, or potentially (although unlikely) some combination of new and old data.
*/
@Override
public void pingIterationCompleted() {
int numGroups = orderedGroups.size();
if (numGroups == 1) {
pingIterationCompletedSingleGroup();
} else {
pingIterationCompletedMultipleGroups();
}
}
private boolean isGroupCoverageSufficient(int workingNodes, int nodesInGroup, long activeDocuments, long averageDocumentsInOtherGroups) {
boolean sufficientCoverage = true;
if (averageDocumentsInOtherGroups > 0) {
double coverage = 100.0 * (double) activeDocuments / averageDocumentsInOtherGroups;
sufficientCoverage = coverage >= dispatchConfig.minActivedocsPercentage();
}
if (sufficientCoverage) {
sufficientCoverage = isGroupNodeCoverageSufficient(workingNodes, nodesInGroup);
}
return sufficientCoverage;
}
private boolean isGroupNodeCoverageSufficient(int workingNodes, int nodesInGroup) {
int nodesAllowedDown = dispatchConfig.maxNodesDownPerGroup()
+ (int) (((double) nodesInGroup * (100.0 - dispatchConfig.minGroupCoverage())) / 100.0);
return workingNodes + nodesAllowedDown >= nodesInGroup;
}
/**
* Calculate whether a subset of nodes in a group has enough coverage
*/
public boolean isPartialGroupCoverageSufficient(OptionalInt knownGroupId, List<Node> nodes) {
if (orderedGroups.size() == 1) {
boolean sufficient = nodes.size() >= groupSize() - dispatchConfig.maxNodesDownPerGroup();
return sufficient;
}
if (knownGroupId.isEmpty()) {
return false;
}
int groupId = knownGroupId.getAsInt();
Group group = groups.get(groupId);
if (group == null) {
return false;
}
int nodesInGroup = group.nodes().size();
long sumOfActiveDocuments = 0;
int otherGroups = 0;
for (Group g : orderedGroups) {
if (g.id() != groupId) {
sumOfActiveDocuments += g.getActiveDocuments();
otherGroups++;
}
}
long activeDocuments = 0;
for (Node n : nodes) {
activeDocuments += n.getActiveDocuments();
}
long averageDocumentsInOtherGroups = sumOfActiveDocuments / otherGroups;
return isGroupCoverageSufficient(nodes.size(), nodesInGroup, activeDocuments, averageDocumentsInOtherGroups);
}
private void trackGroupCoverageChanges(int index, Group group, boolean fullCoverage, long averageDocuments) {
if ( ! hasInformationAboutAllNodes()) return;
boolean changed = group.isFullCoverageStatusChanged(fullCoverage);
if (changed || (!fullCoverage && System.currentTimeMillis() > nextLogTime)) {
nextLogTime = System.currentTimeMillis() + 30 * 1000;
int requiredNodes = groupSize() - dispatchConfig.maxNodesDownPerGroup();
if (fullCoverage) {
log.info(() -> String.format("Group %d is now good again (%d/%d active docs, coverage %d/%d)",
index, group.getActiveDocuments(), averageDocuments, group.workingNodes(), groupSize()));
} else {
StringBuilder missing = new StringBuilder();
for (var node : group.nodes()) {
if (node.isWorking() != Boolean.TRUE) {
missing.append('\n').append(node.toString());
}
}
log.warning(() -> String.format("Coverage of group %d is only %d/%d (requires %d) (%d/%d active docs) Failed nodes are:%s",
index, group.workingNodes(), groupSize(), requiredNodes, group.getActiveDocuments(), averageDocuments, missing.toString()));
}
}
}
} |
Should probably be `Level.FINEST` to match `log.finest()` call below | List<Document> getNextDocsForFeeding(long maxWaitUnits, TimeUnit timeUnit) {
List<Document> docsForSendChunk = new ArrayList<>();
int chunkSizeBytes = 0;
try {
drainFirstDocumentsInQueueIfOld();
Document doc = documentQueue.poll(maxWaitUnits, timeUnit);
if (doc != null) {
docsForSendChunk.add(doc);
chunkSizeBytes = doc.size();
}
} catch (InterruptedException ie) {
log.fine("Got break signal while waiting for new documents to feed");
return docsForSendChunk;
}
int pendingSize = 1 + resultQueue.getPendingSize();
while (chunkSizeBytes < maxChunkSizeBytes && pendingSize < maxInFlightRequests) {
drainFirstDocumentsInQueueIfOld();
Document document = documentQueue.poll();
if (document == null) break;
docsForSendChunk.add(document);
chunkSizeBytes += document.size();
pendingSize++;
}
if (log.isLoggable(Level.FINE))
log.finest("Chunk has " + docsForSendChunk.size() + " docs with a size " + chunkSizeBytes + " bytes");
docsReceivedCounter.addAndGet(docsForSendChunk.size());
return docsForSendChunk;
} | if (log.isLoggable(Level.FINE)) | List<Document> getNextDocsForFeeding(long maxWaitUnits, TimeUnit timeUnit) {
List<Document> docsForSendChunk = new ArrayList<>();
int chunkSizeBytes = 0;
try {
drainFirstDocumentsInQueueIfOld();
Document doc = documentQueue.poll(maxWaitUnits, timeUnit);
if (doc != null) {
docsForSendChunk.add(doc);
chunkSizeBytes = doc.size();
}
} catch (InterruptedException ie) {
log.fine("Got break signal while waiting for new documents to feed");
return docsForSendChunk;
}
int pendingSize = 1 + resultQueue.getPendingSize();
int thisMaxChunkSizeBytes = randomize(maxChunkSizeBytes);
int thisMaxInFlightRequests = randomize(maxInFlightRequests);
while (chunkSizeBytes < thisMaxChunkSizeBytes && pendingSize < thisMaxInFlightRequests) {
drainFirstDocumentsInQueueIfOld();
Document document = documentQueue.poll();
if (document == null) break;
docsForSendChunk.add(document);
chunkSizeBytes += document.size();
pendingSize++;
}
if (log.isLoggable(Level.FINEST))
log.finest("Chunk has " + docsForSendChunk.size() + " docs with a size " + chunkSizeBytes + " bytes");
docsReceivedCounter.addAndGet(docsForSendChunk.size());
return docsForSendChunk;
} | class ConnectionStats {
public final int wrongSessionDetectedCounter;
public final int wrongVersionDetectedCounter;
public final int problemStatusCodeFromServerCounter;
public final int executeProblemsCounter;
public final int docsReceivedCounter;
public final int statusReceivedCounter;
public final int pendingDocumentStatusCount;
public final int successfullHandshakes;
public final int lastGatewayProcessTimeMillis;
ConnectionStats(int wrongSessionDetectedCounter,
int wrongVersionDetectedCounter,
int problemStatusCodeFromServerCounter,
int executeProblemsCounter,
int docsReceivedCounter,
int statusReceivedCounter,
int pendingDocumentStatusCount,
int successfullHandshakes,
int lastGatewayProcessTimeMillis) {
this.wrongSessionDetectedCounter = wrongSessionDetectedCounter;
this.wrongVersionDetectedCounter = wrongVersionDetectedCounter;
this.problemStatusCodeFromServerCounter = problemStatusCodeFromServerCounter;
this.executeProblemsCounter = executeProblemsCounter;
this.docsReceivedCounter = docsReceivedCounter;
this.statusReceivedCounter = statusReceivedCounter;
this.pendingDocumentStatusCount = pendingDocumentStatusCount;
this.successfullHandshakes = successfullHandshakes;
this.lastGatewayProcessTimeMillis = lastGatewayProcessTimeMillis;
}
} | class ConnectionStats {
public final int wrongSessionDetectedCounter;
public final int wrongVersionDetectedCounter;
public final int problemStatusCodeFromServerCounter;
public final int executeProblemsCounter;
public final int docsReceivedCounter;
public final int statusReceivedCounter;
public final int pendingDocumentStatusCount;
public final int successfullHandshakes;
public final int lastGatewayProcessTimeMillis;
ConnectionStats(int wrongSessionDetectedCounter,
int wrongVersionDetectedCounter,
int problemStatusCodeFromServerCounter,
int executeProblemsCounter,
int docsReceivedCounter,
int statusReceivedCounter,
int pendingDocumentStatusCount,
int successfullHandshakes,
int lastGatewayProcessTimeMillis) {
this.wrongSessionDetectedCounter = wrongSessionDetectedCounter;
this.wrongVersionDetectedCounter = wrongVersionDetectedCounter;
this.problemStatusCodeFromServerCounter = problemStatusCodeFromServerCounter;
this.executeProblemsCounter = executeProblemsCounter;
this.docsReceivedCounter = docsReceivedCounter;
this.statusReceivedCounter = statusReceivedCounter;
this.pendingDocumentStatusCount = pendingDocumentStatusCount;
this.successfullHandshakes = successfullHandshakes;
this.lastGatewayProcessTimeMillis = lastGatewayProcessTimeMillis;
}
} |
Ok, the registry is owned by the handler. All good, then. | protected void destroy() {
laterExecutor.shutdown();
docprocServiceRegistry.allComponents().forEach(docprocService -> docprocService.deconstruct());
} | docprocServiceRegistry.allComponents().forEach(docprocService -> docprocService.deconstruct()); | protected void destroy() {
laterExecutor.shutdown();
docprocServiceRegistry.allComponents().forEach(docprocService -> docprocService.deconstruct());
} | class DocumentProcessingHandler extends AbstractRequestHandler {
private static Logger log = Logger.getLogger(DocumentProcessingHandler.class.getName());
private final ComponentRegistry<DocprocService> docprocServiceRegistry;
private final ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry;
private final ChainRegistry<DocumentProcessor> chainRegistry = new ChainRegistry<>();
private final ScheduledThreadPoolExecutor laterExecutor =
new ScheduledThreadPoolExecutor(2, new DaemonThreadFactory("docproc-later-"));
private ContainerDocumentConfig containerDocConfig;
private final DocumentTypeManager documentTypeManager;
public DocumentProcessingHandler(ComponentRegistry<DocprocService> docprocServiceRegistry,
ComponentRegistry<DocumentProcessor> documentProcessorComponentRegistry,
ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry,
int numThreads,
DocumentTypeManager documentTypeManager,
ChainsModel chainsModel, SchemaMap schemaMap, Statistics statistics,
Metric metric,
ContainerDocumentConfig containerDocConfig) {
this.docprocServiceRegistry = docprocServiceRegistry;
this.docFactoryRegistry = docFactoryRegistry;
this.containerDocConfig = containerDocConfig;
this.documentTypeManager = documentTypeManager;
DocprocService.schemaMap = schemaMap;
laterExecutor.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
laterExecutor.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
if (chainsModel != null) {
prepareChainRegistry(chainRegistry, chainsModel, documentProcessorComponentRegistry);
for (Chain<DocumentProcessor> chain : chainRegistry.allComponents()) {
log.config("Setting up call stack for chain " + chain.getId());
DocprocService service = new DocprocService(chain.getId(), convertToCallStack(chain, statistics, metric), documentTypeManager, computeNumThreads(numThreads));
service.setInService(true);
docprocServiceRegistry.register(service.getId(), service);
}
}
}
private static int computeNumThreads(int maxThreads) {
return (maxThreads > 0) ? maxThreads : Runtime.getRuntime().availableProcessors();
}
public DocumentProcessingHandler(ComponentRegistry<DocprocService> docprocServiceRegistry,
ComponentRegistry<DocumentProcessor> documentProcessorComponentRegistry,
ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry,
DocumentProcessingHandlerParameters params) {
this(docprocServiceRegistry, documentProcessorComponentRegistry, docFactoryRegistry,
params.getMaxNumThreads(),
params.getDocumentTypeManager(), params.getChainsModel(), params.getSchemaMap(),
params.getStatisticsManager(),
params.getMetric(),
params.getContainerDocConfig());
}
@Inject
public DocumentProcessingHandler(ComponentRegistry<DocumentProcessor> documentProcessorComponentRegistry,
ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry,
ChainsConfig chainsConfig,
SchemamappingConfig mappingConfig,
DocumentmanagerConfig docManConfig,
DocprocConfig docprocConfig,
ContainerMbusConfig containerMbusConfig,
ContainerDocumentConfig containerDocConfig,
Statistics manager,
Metric metric) {
this(new ComponentRegistry<>(),
documentProcessorComponentRegistry, docFactoryRegistry, new DocumentProcessingHandlerParameters().setMaxNumThreads
(docprocConfig.numthreads())
.setDocumentTypeManager(new DocumentTypeManager(docManConfig))
.setChainsModel(buildFromConfig(chainsConfig)).setSchemaMap(configureMapping(mappingConfig))
.setStatisticsManager(manager)
.setMetric(metric)
.setContainerDocumentConfig(containerDocConfig));
}
@Override
public ComponentRegistry<DocprocService> getDocprocServiceRegistry() {
return docprocServiceRegistry;
}
public ChainRegistry<DocumentProcessor> getChains() {
return chainRegistry;
}
private static SchemaMap configureMapping(SchemamappingConfig mappingConfig) {
SchemaMap map = new SchemaMap();
map.configure(mappingConfig);
return map;
}
private static CallStack convertToCallStack(Chain<DocumentProcessor> chain, Statistics statistics, Metric metric) {
CallStack stack = new CallStack(chain.getId().stringValue(), statistics, metric);
for (DocumentProcessor processor : chain.components()) {
processor.getFieldMap().putAll(DocprocService.schemaMap.chainMap(chain.getId().stringValue(), processor.getId().stringValue()));
stack.addLast(processor);
}
return stack;
}
@Override
public ContentChannel handleRequest(Request request, ResponseHandler handler) {
RequestContext requestContext;
if (request instanceof MbusRequest) {
requestContext = new MbusRequestContext((MbusRequest) request, handler, docprocServiceRegistry, docFactoryRegistry, containerDocConfig);
} else {
throw new IllegalArgumentException("Request type not supported: " + request);
}
if (!requestContext.isProcessable()) {
requestContext.skip();
return null;
}
String serviceName = requestContext.getServiceName();
DocprocService service = docprocServiceRegistry.getComponent(serviceName);
if (service == null) {
log.log(Level.SEVERE, "DocprocService for session '" + serviceName +
"' not found, returning request '" + requestContext + "'.");
requestContext.processingFailed(RequestContext.ErrorCode.ERROR_PROCESSING_FAILURE,
"DocprocService " + serviceName + " not found.");
return null;
} else if (service.getExecutor().getCallStack().size() == 0) {
requestContext.skip();
return null;
}
DocumentProcessingTask task = new DocumentProcessingTask(requestContext, this, service, service.getThreadPoolExecutor());
task.submit();
return null;
}
void submit(DocumentProcessingTask task, long delay) {
LaterTimerTask timerTask = new LaterTimerTask(task, delay);
laterExecutor.schedule(timerTask, delay, TimeUnit.MILLISECONDS);
}
private class LaterTimerTask extends TimerTask {
private DocumentProcessingTask processingTask;
private long delay;
private LaterTimerTask(DocumentProcessingTask processingTask, long delay) {
this.delay = delay;
log.log(Level.FINE, "Enqueueing in " + delay + " ms due to Progress.LATER: " + processingTask);
this.processingTask = processingTask;
}
@Override
public void run() {
log.log(Level.FINE, "Submitting after having waited " + delay + " ms in LATER queue: " + processingTask);
processingTask.submit();
}
}
public DocumentTypeManager getDocumentTypeManager() {
return documentTypeManager;
}
} | class DocumentProcessingHandler extends AbstractRequestHandler {
private static Logger log = Logger.getLogger(DocumentProcessingHandler.class.getName());
private final ComponentRegistry<DocprocService> docprocServiceRegistry;
private final ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry;
private final ChainRegistry<DocumentProcessor> chainRegistry = new ChainRegistry<>();
private final ScheduledThreadPoolExecutor laterExecutor =
new ScheduledThreadPoolExecutor(2, new DaemonThreadFactory("docproc-later-"));
private ContainerDocumentConfig containerDocConfig;
private final DocumentTypeManager documentTypeManager;
public DocumentProcessingHandler(ComponentRegistry<DocprocService> docprocServiceRegistry,
ComponentRegistry<DocumentProcessor> documentProcessorComponentRegistry,
ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry,
int numThreads,
DocumentTypeManager documentTypeManager,
ChainsModel chainsModel, SchemaMap schemaMap, Statistics statistics,
Metric metric,
ContainerDocumentConfig containerDocConfig) {
this.docprocServiceRegistry = docprocServiceRegistry;
this.docFactoryRegistry = docFactoryRegistry;
this.containerDocConfig = containerDocConfig;
this.documentTypeManager = documentTypeManager;
DocprocService.schemaMap = schemaMap;
laterExecutor.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
laterExecutor.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
if (chainsModel != null) {
prepareChainRegistry(chainRegistry, chainsModel, documentProcessorComponentRegistry);
for (Chain<DocumentProcessor> chain : chainRegistry.allComponents()) {
log.config("Setting up call stack for chain " + chain.getId());
DocprocService service = new DocprocService(chain.getId(), convertToCallStack(chain, statistics, metric), documentTypeManager, computeNumThreads(numThreads));
service.setInService(true);
docprocServiceRegistry.register(service.getId(), service);
}
}
}
private static int computeNumThreads(int maxThreads) {
return (maxThreads > 0) ? maxThreads : Runtime.getRuntime().availableProcessors();
}
public DocumentProcessingHandler(ComponentRegistry<DocprocService> docprocServiceRegistry,
ComponentRegistry<DocumentProcessor> documentProcessorComponentRegistry,
ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry,
DocumentProcessingHandlerParameters params) {
this(docprocServiceRegistry, documentProcessorComponentRegistry, docFactoryRegistry,
params.getMaxNumThreads(),
params.getDocumentTypeManager(), params.getChainsModel(), params.getSchemaMap(),
params.getStatisticsManager(),
params.getMetric(),
params.getContainerDocConfig());
}
@Inject
public DocumentProcessingHandler(ComponentRegistry<DocumentProcessor> documentProcessorComponentRegistry,
ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry,
ChainsConfig chainsConfig,
SchemamappingConfig mappingConfig,
DocumentmanagerConfig docManConfig,
DocprocConfig docprocConfig,
ContainerMbusConfig containerMbusConfig,
ContainerDocumentConfig containerDocConfig,
Statistics manager,
Metric metric) {
this(new ComponentRegistry<>(),
documentProcessorComponentRegistry, docFactoryRegistry, new DocumentProcessingHandlerParameters().setMaxNumThreads
(docprocConfig.numthreads())
.setDocumentTypeManager(new DocumentTypeManager(docManConfig))
.setChainsModel(buildFromConfig(chainsConfig)).setSchemaMap(configureMapping(mappingConfig))
.setStatisticsManager(manager)
.setMetric(metric)
.setContainerDocumentConfig(containerDocConfig));
}
@Override
public ComponentRegistry<DocprocService> getDocprocServiceRegistry() {
return docprocServiceRegistry;
}
public ChainRegistry<DocumentProcessor> getChains() {
return chainRegistry;
}
private static SchemaMap configureMapping(SchemamappingConfig mappingConfig) {
SchemaMap map = new SchemaMap();
map.configure(mappingConfig);
return map;
}
private static CallStack convertToCallStack(Chain<DocumentProcessor> chain, Statistics statistics, Metric metric) {
CallStack stack = new CallStack(chain.getId().stringValue(), statistics, metric);
for (DocumentProcessor processor : chain.components()) {
processor.getFieldMap().putAll(DocprocService.schemaMap.chainMap(chain.getId().stringValue(), processor.getId().stringValue()));
stack.addLast(processor);
}
return stack;
}
@Override
public ContentChannel handleRequest(Request request, ResponseHandler handler) {
RequestContext requestContext;
if (request instanceof MbusRequest) {
requestContext = new MbusRequestContext((MbusRequest) request, handler, docprocServiceRegistry, docFactoryRegistry, containerDocConfig);
} else {
throw new IllegalArgumentException("Request type not supported: " + request);
}
if (!requestContext.isProcessable()) {
requestContext.skip();
return null;
}
String serviceName = requestContext.getServiceName();
DocprocService service = docprocServiceRegistry.getComponent(serviceName);
if (service == null) {
log.log(Level.SEVERE, "DocprocService for session '" + serviceName +
"' not found, returning request '" + requestContext + "'.");
requestContext.processingFailed(RequestContext.ErrorCode.ERROR_PROCESSING_FAILURE,
"DocprocService " + serviceName + " not found.");
return null;
} else if (service.getExecutor().getCallStack().size() == 0) {
requestContext.skip();
return null;
}
DocumentProcessingTask task = new DocumentProcessingTask(requestContext, this, service, service.getThreadPoolExecutor());
task.submit();
return null;
}
void submit(DocumentProcessingTask task, long delay) {
LaterTimerTask timerTask = new LaterTimerTask(task, delay);
laterExecutor.schedule(timerTask, delay, TimeUnit.MILLISECONDS);
}
private class LaterTimerTask extends TimerTask {
private DocumentProcessingTask processingTask;
private long delay;
private LaterTimerTask(DocumentProcessingTask processingTask, long delay) {
this.delay = delay;
log.log(Level.FINE, "Enqueueing in " + delay + " ms due to Progress.LATER: " + processingTask);
this.processingTask = processingTask;
}
@Override
public void run() {
log.log(Level.FINE, "Submitting after having waited " + delay + " ms in LATER queue: " + processingTask);
processingTask.submit();
}
}
public DocumentTypeManager getDocumentTypeManager() {
return documentTypeManager;
}
} |
Consider extending one of the deprovision tests in `NodeRepositoryTest` to test this behaviour. | public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
requireRemovable(node, false, force);
if (node.type() == NodeType.host) {
List<Node> children = list().childrenOf(node).asList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children);
List<Node> removed = new ArrayList<>(children);
if (zone.cloud().value().equals("aws"))
db.removeNodes(List.of(node));
else {
node = node.with(IP.Config.EMPTY);
move(node, State.deprovisioned, Agent.system, Optional.empty());
}
removed.add(node);
return removed;
}
else {
db.removeNodes(List.of(node));
return List.of(node);
}
}
} | node = node.with(IP.Config.EMPTY); | public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
requireRemovable(node, false, force);
if (node.type() == NodeType.host) {
List<Node> children = list().childrenOf(node).asList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children);
List<Node> removed = new ArrayList<>(children);
if (zone.cloud().value().equals("aws"))
db.removeNodes(List.of(node));
else {
node = node.with(IP.Config.EMPTY);
move(node, State.deprovisioned, Agent.system, Optional.empty());
}
removed.add(node);
return removed;
}
else {
db.removeNodes(List.of(node));
return List.of(node);
}
}
} | class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient db;
private final Clock clock;
private final Zone zone;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
private final OsVersions osVersions;
private final InfrastructureVersions infrastructureVersions;
private final FirmwareChecks firmwareChecks;
private final DockerImages dockerImages;
private final JobControl jobControl;
private final Applications applications;
/**
* Creates a node repository from a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, Curator curator, Zone zone) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.dockerImage()), config.useCuratorClientCache());
}
/**
* Creates a node repository from a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver,
DockerImage dockerImage, boolean useCuratorClientCache) {
this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache);
this.zone = zone;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
this.osVersions = new OsVersions(this);
this.infrastructureVersions = new InfrastructureVersions(db);
this.firmwareChecks = new FirmwareChecks(db, clock);
this.dockerImages = new DockerImages(db, dockerImage);
this.jobControl = new JobControl(db);
this.applications = new Applications();
for (State state : State.values())
db.writeTo(state, db.getNodes(state), Agent.system, Optional.empty());
}
/** Returns the curator database client used by this */
public CuratorDatabaseClient database() { return db; }
/** Returns the Docker image to use for given node */
public DockerImage dockerImage(Node node) { return dockerImages.dockerImageFor(node.type()); }
/** @return The name resolver used to resolve hostname and ip addresses */
public NameResolver nameResolver() { return nameResolver; }
/** Returns the OS versions to use for nodes in this */
public OsVersions osVersions() { return osVersions; }
/** Returns the infrastructure versions to use for nodes in this */
public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; }
/** Returns the status of firmware checks for hosts managed by this. */
public FirmwareChecks firmwareChecks() { return firmwareChecks; }
/** Returns the docker images to use for nodes in this. */
public DockerImages dockerImages() { return dockerImages; }
/** Returns the status of maintenance jobs managed by this. */
public JobControl jobControl() { return jobControl; }
/** Returns this node repo's view of the applications deployed to it */
public Applications applications() { return applications; }
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, State ... inState) {
return db.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(State ... inState) {
return new ArrayList<>(db.getNodes(inState));
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, State ... inState) {
return db.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/** Returns a filterable list of all nodes in this repository */
public NodeList list() {
return NodeList.copyOf(getNodes());
}
/** Returns a filterable list of all nodes of an application */
public NodeList list(ApplicationId application) {
return NodeList.copyOf(getNodes(application));
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(getNodes(), lock);
}
/** Returns a filterable list of all load balancers in this repository */
public LoadBalancerList loadBalancers() {
return loadBalancers((ignored) -> true);
}
/** Returns a filterable list of load balancers belonging to given application */
public LoadBalancerList loadBalancers(ApplicationId application) {
return loadBalancers((id) -> id.application().equals(application));
}
private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) {
return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values());
}
public List<Node> getNodes(ApplicationId id, State ... inState) { return db.getNodes(id, inState); }
public List<Node> getInactive() { return db.getNodes(State.inactive); }
public List<Node> getFailed() { return db.getNodes(State.failed); }
/**
* Returns the ACL for the node (trusted nodes, networks and ports)
*/
private NodeAcl getNodeAcl(Node node, NodeList candidates) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
Set<Integer> trustedPorts = new LinkedHashSet<>();
Set<String> trustedNetworks = new LinkedHashSet<>();
trustedPorts.add(22);
candidates.parentOf(node).ifPresent(trustedNodes::add);
node.allocation().ifPresent(allocation -> {
trustedNodes.addAll(candidates.owner(allocation.owner()).asList());
loadBalancers(allocation.owner()).asList().stream()
.map(LoadBalancer::instance)
.map(LoadBalancerInstance::networks)
.forEach(trustedNetworks::addAll);
});
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
node.allocation().ifPresent(allocation ->
trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner()).asList()).asList()));
if (node.state() == State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
trustedPorts.add(4443);
break;
case proxy:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedPorts.add(443);
trustedPorts.add(4080);
trustedPorts.add(4443);
break;
case controller:
trustedPorts.add(4443);
trustedPorts.add(443);
trustedPorts.add(80);
break;
default:
illegal("Don't know how to create ACL for " + node + " of type " + node.type());
}
return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
NodeList candidates = list();
if (children) {
return candidates.childrenOf(node).asList().stream()
.map(childNode -> getNodeAcl(childNode, candidates))
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
return Collections.singletonList(getNodeAcl(node, candidates));
}
public NodeFlavors getAvailableFlavors() {
return flavors;
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, IP.Config ipConfig, Optional<String> parentHostname,
Flavor flavor, Optional<TenantName> reservedTo, NodeType type) {
if (ipConfig.primary().isEmpty())
ipConfig = ipConfig.with(nameResolver.getAllByNameOrThrow(hostname));
return Node.create(openStackId, ipConfig, hostname, parentHostname, Optional.empty(), flavor, reservedTo, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname, Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, IP.Config.EMPTY, parentHostname, flavor, Optional.empty(), type);
}
/** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */
public List<Node> addDockerNodes(LockedNodeList nodes) {
for (Node node : nodes) {
if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER))
illegal("Cannot add " + node + ": This is not a docker node");
if ( ! node.allocation().isPresent())
illegal("Cannot add " + node + ": Docker containers needs to be allocated");
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
return db.addNodesInState(nodes.asList(), State.reserved);
}
/**
* Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes.
* If any of the nodes already exists in the deprovisioned state, the new node will be merged
* with the history of that node.
*/
public List<Node> addNodes(List<Node> nodes, Agent agent) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToRemove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
for (int j = 0; j < i; j++) {
if (node.equals(nodes.get(j)))
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
node = node.with(existing.get().history());
node = node.with(existing.get().reports());
node = node.with(node.status().withFailCount(existing.get().status().failCount()));
if (existing.get().status().firmwareVerifiedAt().isPresent())
node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get()));
nodesToRemove.add(existing.get());
}
nodesToAdd.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
resultingNodes.addAll(db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned));
db.removeNodes(nodesToRemove);
return resultingNodes;
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != State.provisioned && node.state() != State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
return node.with(node.status().withWantToRetire(false).withWantToDeprovision(false));
})
.collect(Collectors.toList());
return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes, lock);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
deactivate(db.getNodes(application, State.reserved, State.active), transaction);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason));
}
/**
* Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(Node node, Agent agent, String reason) {
return db.writeTo(State.dirty, node, agent, Optional.of(reason));
}
public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = getNode(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isDockerHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != State.provisioned)
.filter(node -> node.state() != State.failed)
.filter(node -> node.state() != State.parked)
.map(Node::hostname)
.collect(Collectors.toList());
if ( ! hostnamesNotAllowedToDirty.isEmpty())
illegal("Could not deallocate " + nodeToDirty + ": " +
hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked]");
return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList());
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, true, State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) {
return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason));
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, true, State.active, agent, Optional.of(reason));
}
private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) {
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, true, toState, agent, reason));
return moved;
}
private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
if (!keepAllocation && node.allocation().isPresent()) {
node = node.withoutAllocation();
}
return move(node, toState, agent, reason);
}
private Node move(Node node, State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
illegal("Could not set " + node + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
}
}
return db.writeTo(toState, node, agent, reason);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For tenant docker
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
return removeRecursively(node, true).get(0);
}
if (node.state() == State.ready) return node;
Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost);
if ( ! failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
return setReady(Collections.singletonList(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
return removeRecursively(node, false);
}
/**
* Throws if the given node cannot be removed. Removal is allowed if:
* - Tenant node: node is unallocated
* - Non-Docker-container node: iff in state provisioned|failed|parked
* - Docker-container-node:
* If only removing the container node: node in state ready
* If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready
*/
private void requireRemovable(Node node, boolean removingAsChild, boolean force) {
if (force) return;
if (node.type() == NodeType.tenant && node.allocation().isPresent())
illegal(node + " is currently allocated and cannot be removed");
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && !removingAsChild) {
if (node.state() != State.ready)
illegal(node + " can not be removed as it is not in the state [ready]");
}
else if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER) {
Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready);
if ( ! legalStates.contains(node.state()))
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
else {
Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked);
if (! legalStates.contains(node.state()))
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Increases the restart generation of the active nodes matching the filter.
*
* @return the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(State.active, filter),
(node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* @return the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
/**
* Set target OS version of all nodes matching given filter.
*
* @return the nodes in their new state.
*/
public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> {
var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version));
return write(node.with(newStatus), lock);
});
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock Already acquired lock
* @return the written node for convenience
*/
public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node, lock));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node, lock));
}
}
return resultingNodes;
}
public boolean canAllocateTenantNodeTo(Node host) {
if (!host.type().canRun(NodeType.tenant)) return false;
if (host.status().wantToRetire() || host.allocation().map(alloc -> alloc.membership().retired()).orElse(false))
return false;
if (!zone.cloud().value().equals("aws")) return host.state() == State.active;
else return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state());
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Returns the zone of this system */
public Zone zone() { return zone; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return db.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); }
/** Create a lock which provides exclusive rights to modifying unallocated nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Acquires the appropriate lock for this node */
public Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
private void illegal(String message) {
throw new IllegalArgumentException(message);
}
} | class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient db;
private final Clock clock;
private final Zone zone;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
private final OsVersions osVersions;
private final InfrastructureVersions infrastructureVersions;
private final FirmwareChecks firmwareChecks;
private final DockerImages dockerImages;
private final JobControl jobControl;
private final Applications applications;
/**
* Creates a node repository from a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, Curator curator, Zone zone) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.dockerImage()), config.useCuratorClientCache());
}
/**
* Creates a node repository from a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver,
DockerImage dockerImage, boolean useCuratorClientCache) {
this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache);
this.zone = zone;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
this.osVersions = new OsVersions(this);
this.infrastructureVersions = new InfrastructureVersions(db);
this.firmwareChecks = new FirmwareChecks(db, clock);
this.dockerImages = new DockerImages(db, dockerImage);
this.jobControl = new JobControl(db);
this.applications = new Applications();
for (State state : State.values())
db.writeTo(state, db.getNodes(state), Agent.system, Optional.empty());
}
/** Returns the curator database client used by this */
public CuratorDatabaseClient database() { return db; }
/** Returns the Docker image to use for given node */
public DockerImage dockerImage(Node node) { return dockerImages.dockerImageFor(node.type()); }
/** @return The name resolver used to resolve hostname and ip addresses */
public NameResolver nameResolver() { return nameResolver; }
/** Returns the OS versions to use for nodes in this */
public OsVersions osVersions() { return osVersions; }
/** Returns the infrastructure versions to use for nodes in this */
public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; }
/** Returns the status of firmware checks for hosts managed by this. */
public FirmwareChecks firmwareChecks() { return firmwareChecks; }
/** Returns the docker images to use for nodes in this. */
public DockerImages dockerImages() { return dockerImages; }
/** Returns the status of maintenance jobs managed by this. */
public JobControl jobControl() { return jobControl; }
/** Returns this node repo's view of the applications deployed to it */
public Applications applications() { return applications; }
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, State ... inState) {
return db.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(State ... inState) {
return new ArrayList<>(db.getNodes(inState));
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, State ... inState) {
return db.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/** Returns a filterable list of all nodes in this repository */
public NodeList list() {
return NodeList.copyOf(getNodes());
}
/** Returns a filterable list of all nodes of an application */
public NodeList list(ApplicationId application) {
return NodeList.copyOf(getNodes(application));
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(getNodes(), lock);
}
/** Returns a filterable list of all load balancers in this repository */
public LoadBalancerList loadBalancers() {
return loadBalancers((ignored) -> true);
}
/** Returns a filterable list of load balancers belonging to given application */
public LoadBalancerList loadBalancers(ApplicationId application) {
return loadBalancers((id) -> id.application().equals(application));
}
private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) {
return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values());
}
public List<Node> getNodes(ApplicationId id, State ... inState) { return db.getNodes(id, inState); }
public List<Node> getInactive() { return db.getNodes(State.inactive); }
public List<Node> getFailed() { return db.getNodes(State.failed); }
/**
* Returns the ACL for the node (trusted nodes, networks and ports)
*/
private NodeAcl getNodeAcl(Node node, NodeList candidates) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
Set<Integer> trustedPorts = new LinkedHashSet<>();
Set<String> trustedNetworks = new LinkedHashSet<>();
trustedPorts.add(22);
candidates.parentOf(node).ifPresent(trustedNodes::add);
node.allocation().ifPresent(allocation -> {
trustedNodes.addAll(candidates.owner(allocation.owner()).asList());
loadBalancers(allocation.owner()).asList().stream()
.map(LoadBalancer::instance)
.map(LoadBalancerInstance::networks)
.forEach(trustedNetworks::addAll);
});
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
node.allocation().ifPresent(allocation ->
trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner()).asList()).asList()));
if (node.state() == State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
trustedPorts.add(4443);
break;
case proxy:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedPorts.add(443);
trustedPorts.add(4080);
trustedPorts.add(4443);
break;
case controller:
trustedPorts.add(4443);
trustedPorts.add(443);
trustedPorts.add(80);
break;
default:
illegal("Don't know how to create ACL for " + node + " of type " + node.type());
}
return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
NodeList candidates = list();
if (children) {
return candidates.childrenOf(node).asList().stream()
.map(childNode -> getNodeAcl(childNode, candidates))
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
return Collections.singletonList(getNodeAcl(node, candidates));
}
public NodeFlavors getAvailableFlavors() {
return flavors;
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, IP.Config ipConfig, Optional<String> parentHostname,
Flavor flavor, Optional<TenantName> reservedTo, NodeType type) {
if (ipConfig.primary().isEmpty())
ipConfig = ipConfig.with(nameResolver.getAllByNameOrThrow(hostname));
return Node.create(openStackId, ipConfig, hostname, parentHostname, Optional.empty(), flavor, reservedTo, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname, Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, IP.Config.EMPTY, parentHostname, flavor, Optional.empty(), type);
}
/** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */
public List<Node> addDockerNodes(LockedNodeList nodes) {
for (Node node : nodes) {
if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER))
illegal("Cannot add " + node + ": This is not a docker node");
if ( ! node.allocation().isPresent())
illegal("Cannot add " + node + ": Docker containers needs to be allocated");
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
return db.addNodesInState(nodes.asList(), State.reserved);
}
/**
* Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes.
* If any of the nodes already exists in the deprovisioned state, the new node will be merged
* with the history of that node.
*/
public List<Node> addNodes(List<Node> nodes, Agent agent) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToRemove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
for (int j = 0; j < i; j++) {
if (node.equals(nodes.get(j)))
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
node = node.with(existing.get().history());
node = node.with(existing.get().reports());
node = node.with(node.status().withFailCount(existing.get().status().failCount()));
if (existing.get().status().firmwareVerifiedAt().isPresent())
node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get()));
nodesToRemove.add(existing.get());
}
nodesToAdd.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
resultingNodes.addAll(db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned));
db.removeNodes(nodesToRemove);
return resultingNodes;
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != State.provisioned && node.state() != State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
return node.with(node.status().withWantToRetire(false).withWantToDeprovision(false));
})
.collect(Collectors.toList());
return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes, lock);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
deactivate(db.getNodes(application, State.reserved, State.active), transaction);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason));
}
/**
* Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(Node node, Agent agent, String reason) {
return db.writeTo(State.dirty, node, agent, Optional.of(reason));
}
public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = getNode(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isDockerHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != State.provisioned)
.filter(node -> node.state() != State.failed)
.filter(node -> node.state() != State.parked)
.map(Node::hostname)
.collect(Collectors.toList());
if ( ! hostnamesNotAllowedToDirty.isEmpty())
illegal("Could not deallocate " + nodeToDirty + ": " +
hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked]");
return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList());
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, true, State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) {
return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason));
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, true, State.active, agent, Optional.of(reason));
}
private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) {
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, true, toState, agent, reason));
return moved;
}
private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
if (!keepAllocation && node.allocation().isPresent()) {
node = node.withoutAllocation();
}
return move(node, toState, agent, reason);
}
private Node move(Node node, State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
illegal("Could not set " + node + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
}
}
return db.writeTo(toState, node, agent, reason);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For tenant docker
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
return removeRecursively(node, true).get(0);
}
if (node.state() == State.ready) return node;
Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost);
if ( ! failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
return setReady(Collections.singletonList(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
return removeRecursively(node, false);
}
/**
* Throws if the given node cannot be removed. Removal is allowed if:
* - Tenant node: node is unallocated
* - Non-Docker-container node: iff in state provisioned|failed|parked
* - Docker-container-node:
* If only removing the container node: node in state ready
* If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready
*/
private void requireRemovable(Node node, boolean removingAsChild, boolean force) {
if (force) return;
if (node.type() == NodeType.tenant && node.allocation().isPresent())
illegal(node + " is currently allocated and cannot be removed");
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && !removingAsChild) {
if (node.state() != State.ready)
illegal(node + " can not be removed as it is not in the state [ready]");
}
else if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER) {
Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready);
if ( ! legalStates.contains(node.state()))
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
else {
Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked);
if (! legalStates.contains(node.state()))
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Increases the restart generation of the active nodes matching the filter.
*
* @return the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(State.active, filter),
(node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* @return the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
/**
* Set target OS version of all nodes matching given filter.
*
* @return the nodes in their new state.
*/
public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> {
var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version));
return write(node.with(newStatus), lock);
});
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock Already acquired lock
* @return the written node for convenience
*/
public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node, lock));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node, lock));
}
}
return resultingNodes;
}
public boolean canAllocateTenantNodeTo(Node host) {
if (!host.type().canRun(NodeType.tenant)) return false;
if (host.status().wantToRetire() || host.allocation().map(alloc -> alloc.membership().retired()).orElse(false))
return false;
if (!zone.cloud().value().equals("aws")) return host.state() == State.active;
else return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state());
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Returns the zone of this system */
public Zone zone() { return zone; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return db.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); }
/** Create a lock which provides exclusive rights to modifying unallocated nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Acquires the appropriate lock for this node */
public Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
private void illegal(String message) {
throw new IllegalArgumentException(message);
}
} |
I'd recommend using `UnixPath` in the test methods to easily create parent directories and/or files you need, the before method should only create directories are guaranteed to always exist, otherwise the tests might not catch that a certain directory is not created as expected. | public void setup() throws IOException {
Files.createDirectories(donePath.resolve("container-123"));
Files.createDirectories(crashPathInContainer);
} | Files.createDirectories(donePath.resolve("container-123")); | public void setup() throws IOException {
Files.createDirectories(crashPathInContainer);
} | class CoredumpHandlerTest {
private final FileSystem fileSystem = TestFileSystem.create();
private final Path donePath = fileSystem.getPath("/home/docker/dumps");
private final NodeAgentContext context = new NodeAgentContextImpl.Builder("container-123.domain.tld")
.fileSystem(fileSystem).build();
private final Path crashPathInContainer = fileSystem.getPath("/var/crash");
private final Path doneCoredumpsPath = fileSystem.getPath("/home/docker/dumps");
private final TestTerminal terminal = new TestTerminal();
private final CoreCollector coreCollector = mock(CoreCollector.class);
private final CoredumpReporter coredumpReporter = mock(CoredumpReporter.class);
private final Metrics metrics = new Metrics();
@SuppressWarnings("unchecked")
private final Supplier<String> coredumpIdSupplier = mock(Supplier.class);
private final CoredumpHandler coredumpHandler = new CoredumpHandler(terminal, coreCollector, coredumpReporter,
crashPathInContainer, doneCoredumpsPath, "users", metrics, coredumpIdSupplier);
@Test
public void coredump_enqueue_test() throws IOException {
final Path crashPathOnHost = fileSystem.getPath("/home/docker/container-1/some/crash/path");
final Path processingDir = fileSystem.getPath("/home/docker/container-1/some/other/processing");
Files.createDirectories(crashPathOnHost);
createFileAged(crashPathOnHost.resolve(".bash.core.431"), Duration.ZERO);
assertFolderContents(crashPathOnHost, ".bash.core.431");
Optional<Path> enqueuedPath = coredumpHandler.enqueueCoredump(crashPathOnHost, processingDir);
assertEquals(Optional.empty(), enqueuedPath);
Files.move(crashPathOnHost.resolve(".bash.core.431"), crashPathOnHost.resolve("bash.core.431"));
createFileAged(crashPathOnHost.resolve("vespa-proton.core.119"), Duration.ofMinutes(10));
createFileAged(crashPathOnHost.resolve("vespa-slobrok.core.673"), Duration.ofMinutes(5));
when(coredumpIdSupplier.get()).thenReturn("id-123").thenReturn("id-321");
enqueuedPath = coredumpHandler.enqueueCoredump(crashPathOnHost, processingDir);
assertEquals(Optional.of(processingDir.resolve("id-123")), enqueuedPath);
assertFolderContents(crashPathOnHost, "bash.core.431", "vespa-slobrok.core.673");
assertFolderContents(processingDir, "id-123");
assertFolderContents(processingDir.resolve("id-123"), "dump_vespa-proton.core.119");
verify(coredumpIdSupplier, times(1)).get();
enqueuedPath = coredumpHandler.enqueueCoredump(crashPathOnHost, processingDir);
assertEquals(Optional.of(processingDir.resolve("id-321")), enqueuedPath);
assertFolderContents(crashPathOnHost, "bash.core.431");
assertFolderContents(processingDir, "id-123", "id-321");
assertFolderContents(processingDir.resolve("id-321"), "dump_vespa-slobrok.core.673");
verify(coredumpIdSupplier, times(2)).get();
}
@Test
public void enqueue_with_hs_err_files() throws IOException {
final Path crashPathOnHost = fileSystem.getPath("/home/docker/container-1/some/crash/path");
final Path processingDir = fileSystem.getPath("/home/docker/container-1/some/other/processing");
Files.createDirectories(crashPathOnHost);
createFileAged(crashPathOnHost.resolve("java.core.69"), Duration.ofSeconds(15));
createFileAged(crashPathOnHost.resolve("hs_err_pid69.log"), Duration.ofSeconds(20));
createFileAged(crashPathOnHost.resolve("java.core.2420"), Duration.ofSeconds(40));
createFileAged(crashPathOnHost.resolve("hs_err_pid2420.log"), Duration.ofSeconds(49));
createFileAged(crashPathOnHost.resolve("hs_err_pid2421.log"), Duration.ofSeconds(50));
when(coredumpIdSupplier.get()).thenReturn("id-123").thenReturn("id-321");
Optional<Path> enqueuedPath = coredumpHandler.enqueueCoredump(crashPathOnHost, processingDir);
assertEquals(Optional.of(processingDir.resolve("id-123")), enqueuedPath);
assertFolderContents(crashPathOnHost, "hs_err_pid69.log", "java.core.69");
assertFolderContents(processingDir, "id-123");
assertFolderContents(processingDir.resolve("id-123"), "hs_err_pid2420.log", "hs_err_pid2421.log", "dump_java.core.2420");
}
@Test
public void coredump_to_process_test() throws IOException {
final Path crashPathOnHost = fileSystem.getPath("/home/docker/container-1/some/crash/path");
final Path processingDir = fileSystem.getPath("/home/docker/container-1/some/other/processing");
Optional<Path> enqueuedPath = coredumpHandler.enqueueCoredump(crashPathOnHost, processingDir);
assertEquals(Optional.empty(), enqueuedPath);
Files.createDirectories(crashPathOnHost);
createFileAged(crashPathOnHost.resolve("bash.core.431"), Duration.ZERO);
createFileAged(crashPathOnHost.resolve("vespa-proton.core.119"), Duration.ofMinutes(10));
createFileAged(crashPathOnHost.resolve("vespa-slobrok.core.673"), Duration.ofMinutes(5));
when(coredumpIdSupplier.get()).thenReturn("id-123");
enqueuedPath = coredumpHandler.getCoredumpToProcess(crashPathOnHost, processingDir);
assertEquals(Optional.of(processingDir.resolve("id-123")), enqueuedPath);
enqueuedPath = coredumpHandler.getCoredumpToProcess(crashPathOnHost, processingDir);
assertEquals(Optional.of(processingDir.resolve("id-123")), enqueuedPath);
verify(coredumpIdSupplier, times(1)).get();
}
@Test
public void get_metadata_test() throws IOException {
Map<String, Object> metadata = new HashMap<>();
metadata.put("bin_path", "/bin/bash");
metadata.put("backtrace", List.of("call 1", "function 2", "something something"));
Map<String, Object> attributes = Map.of(
"hostname", "host123.yahoo.com",
"vespa_version", "6.48.4",
"kernel_version", "3.10.0-862.9.1.el7.x86_64",
"docker_image", "vespa/ci:6.48.4");
String expectedMetadataStr = "{\"fields\":{" +
"\"hostname\":\"host123.yahoo.com\"," +
"\"kernel_version\":\"3.10.0-862.9.1.el7.x86_64\"," +
"\"backtrace\":[\"call 1\",\"function 2\",\"something something\"]," +
"\"vespa_version\":\"6.48.4\"," +
"\"bin_path\":\"/bin/bash\"," +
"\"docker_image\":\"vespa/ci:6.48.4\"" +
"}}";
Path coredumpDirectoryInContainer = Paths.get("/var/crash/id-123");
Path coredumpDirectory = context.pathOnHostFromPathInNode(coredumpDirectoryInContainer);
Files.createDirectories(coredumpDirectory);
Files.createFile(coredumpDirectory.resolve("dump_core.456"));
when(coreCollector.collect(eq(context), eq(coredumpDirectoryInContainer.resolve("dump_core.456"))))
.thenReturn(metadata);
assertEquals(expectedMetadataStr, coredumpHandler.getMetadata(context, coredumpDirectory, () -> attributes));
verify(coreCollector, times(1)).collect(any(), any());
assertEquals(expectedMetadataStr, coredumpHandler.getMetadata(context, coredumpDirectory, () -> attributes));
verify(coreCollector, times(1)).collect(any(), any());
}
@Test(expected = IllegalStateException.class)
public void cant_get_metadata_if_no_core_file() throws IOException {
coredumpHandler.getMetadata(context, fileSystem.getPath("/fake/path"), Map::of);
}
@Test(expected = IllegalStateException.class)
public void fails_to_get_core_file_if_only_compressed() throws IOException {
Path coredumpDirectory = fileSystem.getPath("/path/to/coredump/proccessing/id-123");
Files.createDirectories(coredumpDirectory);
Files.createFile(coredumpDirectory.resolve("dump_bash.core.431.lz4"));
coredumpHandler.findCoredumpFileInProcessingDirectory(coredumpDirectory);
}
@Test
public void process_single_coredump_test() throws IOException {
Path coredumpDirectory = fileSystem.getPath("/path/to/coredump/proccessing/id-123");
Files.createDirectories(coredumpDirectory);
Files.write(coredumpDirectory.resolve("metadata.json"), "metadata".getBytes());
Files.createFile(coredumpDirectory.resolve("dump_bash.core.431"));
assertFolderContents(coredumpDirectory, "metadata.json", "dump_bash.core.431");
terminal.interceptCommand("/usr/bin/lz4 -f /path/to/coredump/proccessing/id-123/dump_bash.core.431 " +
"/path/to/coredump/proccessing/id-123/dump_bash.core.431.lz4 2>&1",
commandLine -> {
uncheck(() -> Files.createFile(fileSystem.getPath(commandLine.getArguments().get(3))));
return new TestChildProcess2(0, "");
});
coredumpHandler.processAndReportSingleCoredump(context, coredumpDirectory, Map::of);
verify(coreCollector, never()).collect(any(), any());
verify(coredumpReporter, times(1)).reportCoredump(eq("id-123"), eq("metadata"));
assertFalse(Files.exists(coredumpDirectory));
assertFolderContents(doneCoredumpsPath.resolve("container-123"), "id-123");
assertFolderContents(doneCoredumpsPath.resolve("container-123").resolve("id-123"), "metadata.json", "dump_bash.core.431.lz4");
}
@Test
public void report_enqueued_and_processed_metrics() throws IOException {
Files.createFile(crashPathInContainer.resolve("dump-1"));
Files.createFile(crashPathInContainer.resolve("dump-2"));
Files.createFile(doneCoredumpsPath.resolve("container-123").resolve("dump-3"));
coredumpHandler.updateMetrics(context, crashPathInContainer);
List<DimensionMetrics> updatedMetrics = metrics.getMetricsByType(Metrics.DimensionType.PRETAGGED);
assertEquals(1, updatedMetrics.size());
Map<String, Number> values = updatedMetrics.get(0).getMetrics();
assertEquals(2, values.get("coredumps.enqueued").intValue());
assertEquals(1, values.get("coredumps.processed").intValue());
}
@Before
@After
public void teardown() {
terminal.verifyAllCommandsExecuted();
}
private static void assertFolderContents(Path pathToFolder, String... filenames) {
Set<String> expectedContentsOfFolder = Set.of(filenames);
Set<String> actualContentsOfFolder = new UnixPath(pathToFolder)
.listContentsOfDirectory().stream()
.map(unixPath -> unixPath.toPath().getFileName().toString())
.collect(Collectors.toSet());
assertEquals(expectedContentsOfFolder, actualContentsOfFolder);
}
private static Path createFileAged(Path path, Duration age) {
return uncheck(() -> Files.setLastModifiedTime(
Files.createFile(path),
FileTime.from(Instant.now().minus(age))));
}
} | class CoredumpHandlerTest {
private final FileSystem fileSystem = TestFileSystem.create();
private final Path donePath = fileSystem.getPath("/home/docker/dumps");
private final NodeAgentContext context = new NodeAgentContextImpl.Builder("container-123.domain.tld")
.fileSystem(fileSystem).build();
private final Path crashPathInContainer = fileSystem.getPath("/var/crash");
private final Path doneCoredumpsPath = fileSystem.getPath("/home/docker/dumps");
private final TestTerminal terminal = new TestTerminal();
private final CoreCollector coreCollector = mock(CoreCollector.class);
private final CoredumpReporter coredumpReporter = mock(CoredumpReporter.class);
private final Metrics metrics = new Metrics();
@SuppressWarnings("unchecked")
private final Supplier<String> coredumpIdSupplier = mock(Supplier.class);
private final CoredumpHandler coredumpHandler = new CoredumpHandler(terminal, coreCollector, coredumpReporter,
crashPathInContainer, doneCoredumpsPath, "users", metrics, coredumpIdSupplier);
@Test
public void coredump_enqueue_test() throws IOException {
final Path crashPathOnHost = fileSystem.getPath("/home/docker/container-1/some/crash/path");
final Path processingDir = fileSystem.getPath("/home/docker/container-1/some/other/processing");
Files.createDirectories(crashPathOnHost);
createFileAged(crashPathOnHost.resolve(".bash.core.431"), Duration.ZERO);
assertFolderContents(crashPathOnHost, ".bash.core.431");
Optional<Path> enqueuedPath = coredumpHandler.enqueueCoredump(crashPathOnHost, processingDir);
assertEquals(Optional.empty(), enqueuedPath);
Files.move(crashPathOnHost.resolve(".bash.core.431"), crashPathOnHost.resolve("bash.core.431"));
createFileAged(crashPathOnHost.resolve("vespa-proton.core.119"), Duration.ofMinutes(10));
createFileAged(crashPathOnHost.resolve("vespa-slobrok.core.673"), Duration.ofMinutes(5));
when(coredumpIdSupplier.get()).thenReturn("id-123").thenReturn("id-321");
enqueuedPath = coredumpHandler.enqueueCoredump(crashPathOnHost, processingDir);
assertEquals(Optional.of(processingDir.resolve("id-123")), enqueuedPath);
assertFolderContents(crashPathOnHost, "bash.core.431", "vespa-slobrok.core.673");
assertFolderContents(processingDir, "id-123");
assertFolderContents(processingDir.resolve("id-123"), "dump_vespa-proton.core.119");
verify(coredumpIdSupplier, times(1)).get();
enqueuedPath = coredumpHandler.enqueueCoredump(crashPathOnHost, processingDir);
assertEquals(Optional.of(processingDir.resolve("id-321")), enqueuedPath);
assertFolderContents(crashPathOnHost, "bash.core.431");
assertFolderContents(processingDir, "id-123", "id-321");
assertFolderContents(processingDir.resolve("id-321"), "dump_vespa-slobrok.core.673");
verify(coredumpIdSupplier, times(2)).get();
}
@Test
public void enqueue_with_hs_err_files() throws IOException {
final Path crashPathOnHost = fileSystem.getPath("/home/docker/container-1/some/crash/path");
final Path processingDir = fileSystem.getPath("/home/docker/container-1/some/other/processing");
Files.createDirectories(crashPathOnHost);
createFileAged(crashPathOnHost.resolve("java.core.69"), Duration.ofSeconds(15));
createFileAged(crashPathOnHost.resolve("hs_err_pid69.log"), Duration.ofSeconds(20));
createFileAged(crashPathOnHost.resolve("java.core.2420"), Duration.ofSeconds(40));
createFileAged(crashPathOnHost.resolve("hs_err_pid2420.log"), Duration.ofSeconds(49));
createFileAged(crashPathOnHost.resolve("hs_err_pid2421.log"), Duration.ofSeconds(50));
when(coredumpIdSupplier.get()).thenReturn("id-123").thenReturn("id-321");
Optional<Path> enqueuedPath = coredumpHandler.enqueueCoredump(crashPathOnHost, processingDir);
assertEquals(Optional.of(processingDir.resolve("id-123")), enqueuedPath);
assertFolderContents(crashPathOnHost, "hs_err_pid69.log", "java.core.69");
assertFolderContents(processingDir, "id-123");
assertFolderContents(processingDir.resolve("id-123"), "hs_err_pid2420.log", "hs_err_pid2421.log", "dump_java.core.2420");
}
@Test
public void coredump_to_process_test() throws IOException {
final Path crashPathOnHost = fileSystem.getPath("/home/docker/container-1/some/crash/path");
final Path processingDir = fileSystem.getPath("/home/docker/container-1/some/other/processing");
Optional<Path> enqueuedPath = coredumpHandler.enqueueCoredump(crashPathOnHost, processingDir);
assertEquals(Optional.empty(), enqueuedPath);
Files.createDirectories(crashPathOnHost);
createFileAged(crashPathOnHost.resolve("bash.core.431"), Duration.ZERO);
createFileAged(crashPathOnHost.resolve("vespa-proton.core.119"), Duration.ofMinutes(10));
createFileAged(crashPathOnHost.resolve("vespa-slobrok.core.673"), Duration.ofMinutes(5));
when(coredumpIdSupplier.get()).thenReturn("id-123");
enqueuedPath = coredumpHandler.getCoredumpToProcess(crashPathOnHost, processingDir);
assertEquals(Optional.of(processingDir.resolve("id-123")), enqueuedPath);
enqueuedPath = coredumpHandler.getCoredumpToProcess(crashPathOnHost, processingDir);
assertEquals(Optional.of(processingDir.resolve("id-123")), enqueuedPath);
verify(coredumpIdSupplier, times(1)).get();
}
@Test
public void get_metadata_test() throws IOException {
Map<String, Object> metadata = new HashMap<>();
metadata.put("bin_path", "/bin/bash");
metadata.put("backtrace", List.of("call 1", "function 2", "something something"));
Map<String, Object> attributes = Map.of(
"hostname", "host123.yahoo.com",
"vespa_version", "6.48.4",
"kernel_version", "3.10.0-862.9.1.el7.x86_64",
"docker_image", "vespa/ci:6.48.4");
String expectedMetadataStr = "{\"fields\":{" +
"\"hostname\":\"host123.yahoo.com\"," +
"\"kernel_version\":\"3.10.0-862.9.1.el7.x86_64\"," +
"\"backtrace\":[\"call 1\",\"function 2\",\"something something\"]," +
"\"vespa_version\":\"6.48.4\"," +
"\"bin_path\":\"/bin/bash\"," +
"\"docker_image\":\"vespa/ci:6.48.4\"" +
"}}";
Path coredumpDirectoryInContainer = Paths.get("/var/crash/id-123");
Path coredumpDirectory = context.pathOnHostFromPathInNode(coredumpDirectoryInContainer);
Files.createDirectories(coredumpDirectory);
Files.createFile(coredumpDirectory.resolve("dump_core.456"));
when(coreCollector.collect(eq(context), eq(coredumpDirectoryInContainer.resolve("dump_core.456"))))
.thenReturn(metadata);
assertEquals(expectedMetadataStr, coredumpHandler.getMetadata(context, coredumpDirectory, () -> attributes));
verify(coreCollector, times(1)).collect(any(), any());
assertEquals(expectedMetadataStr, coredumpHandler.getMetadata(context, coredumpDirectory, () -> attributes));
verify(coreCollector, times(1)).collect(any(), any());
}
@Test(expected = IllegalStateException.class)
public void cant_get_metadata_if_no_core_file() throws IOException {
coredumpHandler.getMetadata(context, fileSystem.getPath("/fake/path"), Map::of);
}
@Test(expected = IllegalStateException.class)
public void fails_to_get_core_file_if_only_compressed() throws IOException {
Path coredumpDirectory = fileSystem.getPath("/path/to/coredump/proccessing/id-123");
Files.createDirectories(coredumpDirectory);
Files.createFile(coredumpDirectory.resolve("dump_bash.core.431.lz4"));
coredumpHandler.findCoredumpFileInProcessingDirectory(coredumpDirectory);
}
@Test
public void process_single_coredump_test() throws IOException {
Path coredumpDirectory = fileSystem.getPath("/path/to/coredump/proccessing/id-123");
Files.createDirectories(coredumpDirectory);
Files.write(coredumpDirectory.resolve("metadata.json"), "metadata".getBytes());
Files.createFile(coredumpDirectory.resolve("dump_bash.core.431"));
assertFolderContents(coredumpDirectory, "metadata.json", "dump_bash.core.431");
terminal.interceptCommand("/usr/bin/lz4 -f /path/to/coredump/proccessing/id-123/dump_bash.core.431 " +
"/path/to/coredump/proccessing/id-123/dump_bash.core.431.lz4 2>&1",
commandLine -> {
uncheck(() -> Files.createFile(fileSystem.getPath(commandLine.getArguments().get(3))));
return new TestChildProcess2(0, "");
});
coredumpHandler.processAndReportSingleCoredump(context, coredumpDirectory, Map::of);
verify(coreCollector, never()).collect(any(), any());
verify(coredumpReporter, times(1)).reportCoredump(eq("id-123"), eq("metadata"));
assertFalse(Files.exists(coredumpDirectory));
assertFolderContents(doneCoredumpsPath.resolve("container-123"), "id-123");
assertFolderContents(doneCoredumpsPath.resolve("container-123").resolve("id-123"), "metadata.json", "dump_bash.core.431.lz4");
}
@Test
public void report_enqueued_and_processed_metrics() throws IOException {
Files.createFile(crashPathInContainer.resolve("dump-1"));
Files.createFile(crashPathInContainer.resolve("dump-2"));
Files.createFile(crashPathInContainer.resolve("hs_err_pid2.log"));
new UnixPath(doneCoredumpsPath.resolve("container-123").resolve("dump-3-folder").resolve("dump-3"))
.createParents()
.createNewFile();
coredumpHandler.updateMetrics(context, crashPathInContainer);
List<DimensionMetrics> updatedMetrics = metrics.getMetricsByType(Metrics.DimensionType.PRETAGGED);
assertEquals(1, updatedMetrics.size());
Map<String, Number> values = updatedMetrics.get(0).getMetrics();
assertEquals(2, values.get("coredumps.enqueued").intValue());
assertEquals(1, values.get("coredumps.processed").intValue());
}
@Before
@After
public void teardown() {
terminal.verifyAllCommandsExecuted();
}
private static void assertFolderContents(Path pathToFolder, String... filenames) {
Set<String> expectedContentsOfFolder = Set.of(filenames);
Set<String> actualContentsOfFolder = new UnixPath(pathToFolder)
.listContentsOfDirectory().stream()
.map(unixPath -> unixPath.toPath().getFileName().toString())
.collect(Collectors.toSet());
assertEquals(expectedContentsOfFolder, actualContentsOfFolder);
}
private static Path createFileAged(Path path, Duration age) {
return uncheck(() -> Files.setLastModifiedTime(
Files.createFile(path),
FileTime.from(Instant.now().minus(age))));
}
} |
The `processing` directory may contain multiple files, at least the core file and `metadata.json`. You probably want to limit max-depth here, but then you'll be off by 1 if there is a coredump being processed right now... | void updateMetrics(NodeAgentContext context, Path containerCrashPathOnHost) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPathOnHost)
.match(nameStartsWith(".").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.files(processedCoredumpsPath)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
} | int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPathOnHost) | void updateMetrics(NodeAgentContext context, Path containerCrashPathOnHost) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPathOnHost)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
} | class CoredumpHandler {
private static final Pattern JAVA_CORE_PATTERN = Pattern.compile("java_pid.*\\.hprof");
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String LZ4_PATH = "/usr/bin/lz4";
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final Terminal terminal;
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final Path crashPatchInContainer;
private final Path doneCoredumpsPath;
private final String operatorGroupName;
private final Supplier<String> coredumpIdSupplier;
private final Metrics metrics;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
* @param operatorGroupName name of the group that will be set as the owner of the processed coredump
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics) {
this(terminal, coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
operatorGroupName, metrics, () -> UUID.randomUUID().toString());
}
CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics, Supplier<String> coredumpIdSupplier) {
this.terminal = terminal;
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.operatorGroupName = operatorGroupName;
this.metrics = metrics;
this.coredumpIdSupplier = coredumpIdSupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier) {
Path containerCrashPathOnHost = context.pathOnHostFromPathInNode(crashPatchInContainer);
Path containerProcessingPathOnHost = containerCrashPathOnHost.resolve(PROCESSING_DIRECTORY_NAME);
FileFinder.files(containerCrashPathOnHost)
.match(nameMatches(JAVA_CORE_PATTERN))
.maxDepth(1)
.deleteRecursively(context);
getCoredumpToProcess(containerCrashPathOnHost, containerProcessingPathOnHost)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
updateMetrics(context, containerCrashPathOnHost);
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<Path> getCoredumpToProcess(Path containerCrashPathOnHost, Path containerProcessingPathOnHost) {
return FileFinder.directories(containerProcessingPathOnHost).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.or(() -> enqueueCoredump(containerCrashPathOnHost, containerProcessingPathOnHost));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<Path> enqueueCoredump(Path containerCrashPathOnHost, Path containerProcessingPathOnHost) {
List<Path> toProcess = FileFinder.files(containerCrashPathOnHost)
.match(nameStartsWith(".").negate())
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.collect(Collectors.toList());
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
Path enqueuedDir = uncheck(() -> Files.createDirectories(containerProcessingPathOnHost.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
void processAndReportSingleCoredump(NodeAgentContext context, Path coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, Path coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!Files.exists(metadataPath.toPath())) {
Path coredumpFilePathOnHost = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Path coredumpFilePathInContainer = context.pathInNodeFromPathOnHost(coredumpFilePathOnHost);
Map<String, Object> metadata = coreCollector.collect(context, coredumpFilePathInContainer);
metadata.putAll(nodeAttributesSupplier.get());
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
/**
* Compresses core file (and deletes the uncompressed core), then moves the entire core dump processing
* directory to {@link
*/
private void finishProcessing(NodeAgentContext context, Path coredumpDirectory) throws IOException {
Path coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Path compressedCoreFile = coreFile.getParent().resolve(coreFile.getFileName() + ".lz4");
terminal.newCommandLine(context)
.add(LZ4_PATH, "-f", coreFile.toString(), compressedCoreFile.toString())
.setTimeout(Duration.ofMinutes(30))
.execute();
new UnixPath(compressedCoreFile).setGroup(operatorGroupName).setPermissions("rw-r-----");
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory, newCoredumpDirectory.resolve(coredumpDirectory.getFileName()));
}
Path findCoredumpFileInProcessingDirectory(Path coredumpProccessingDirectory) {
return FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(".lz4").negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
ApplicationId owner = node.owner().get();
NodeMembership membership = node.membership().get();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value())
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
.add("clustertype", membership.clusterType())
.add("clusterid", membership.clusterId());
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.allowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
node.currentVespaVersion().ifPresent(vespaVersion -> dimensionsBuilder.add("vespaVersion", vespaVersion.toFullString()));
return dimensionsBuilder.build();
}
} | class CoredumpHandler {
private static final Pattern JAVA_CORE_PATTERN = Pattern.compile("java_pid.*\\.hprof");
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String LZ4_PATH = "/usr/bin/lz4";
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final Terminal terminal;
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final Path crashPatchInContainer;
private final Path doneCoredumpsPath;
private final String operatorGroupName;
private final Supplier<String> coredumpIdSupplier;
private final Metrics metrics;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
* @param operatorGroupName name of the group that will be set as the owner of the processed coredump
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics) {
this(terminal, coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
operatorGroupName, metrics, () -> UUID.randomUUID().toString());
}
CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics, Supplier<String> coredumpIdSupplier) {
this.terminal = terminal;
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.operatorGroupName = operatorGroupName;
this.metrics = metrics;
this.coredumpIdSupplier = coredumpIdSupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier) {
Path containerCrashPathOnHost = context.pathOnHostFromPathInNode(crashPatchInContainer);
Path containerProcessingPathOnHost = containerCrashPathOnHost.resolve(PROCESSING_DIRECTORY_NAME);
FileFinder.files(containerCrashPathOnHost)
.match(nameMatches(JAVA_CORE_PATTERN))
.maxDepth(1)
.deleteRecursively(context);
updateMetrics(context, containerCrashPathOnHost);
getCoredumpToProcess(containerCrashPathOnHost, containerProcessingPathOnHost)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<Path> getCoredumpToProcess(Path containerCrashPathOnHost, Path containerProcessingPathOnHost) {
return FileFinder.directories(containerProcessingPathOnHost).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.or(() -> enqueueCoredump(containerCrashPathOnHost, containerProcessingPathOnHost));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<Path> enqueueCoredump(Path containerCrashPathOnHost, Path containerProcessingPathOnHost) {
List<Path> toProcess = FileFinder.files(containerCrashPathOnHost)
.match(nameStartsWith(".").negate())
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.collect(Collectors.toList());
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
Path enqueuedDir = uncheck(() -> Files.createDirectories(containerProcessingPathOnHost.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
void processAndReportSingleCoredump(NodeAgentContext context, Path coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, Path coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!Files.exists(metadataPath.toPath())) {
Path coredumpFilePathOnHost = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Path coredumpFilePathInContainer = context.pathInNodeFromPathOnHost(coredumpFilePathOnHost);
Map<String, Object> metadata = coreCollector.collect(context, coredumpFilePathInContainer);
metadata.putAll(nodeAttributesSupplier.get());
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
/**
* Compresses core file (and deletes the uncompressed core), then moves the entire core dump processing
* directory to {@link
*/
private void finishProcessing(NodeAgentContext context, Path coredumpDirectory) throws IOException {
Path coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Path compressedCoreFile = coreFile.getParent().resolve(coreFile.getFileName() + ".lz4");
terminal.newCommandLine(context)
.add(LZ4_PATH, "-f", coreFile.toString(), compressedCoreFile.toString())
.setTimeout(Duration.ofMinutes(30))
.execute();
new UnixPath(compressedCoreFile).setGroup(operatorGroupName).setPermissions("rw-r-----");
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory, newCoredumpDirectory.resolve(coredumpDirectory.getFileName()));
}
Path findCoredumpFileInProcessingDirectory(Path coredumpProccessingDirectory) {
return FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(".lz4").negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
ApplicationId owner = node.owner().get();
NodeMembership membership = node.membership().get();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value())
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
.add("clustertype", membership.clusterType())
.add("clusterid", membership.clusterId());
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.allowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
node.currentVespaVersion().ifPresent(vespaVersion -> dimensionsBuilder.add("vespaVersion", vespaVersion.toFullString()));
return dimensionsBuilder.build();
}
} |
Same problem here, should be `.directories()` instead of `.files()` and limit max-depth to 1 | void updateMetrics(NodeAgentContext context, Path containerCrashPathOnHost) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPathOnHost)
.match(nameStartsWith(".").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.files(processedCoredumpsPath)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
} | int numberOfProcessedCoredumps = FileFinder.files(processedCoredumpsPath) | void updateMetrics(NodeAgentContext context, Path containerCrashPathOnHost) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPathOnHost)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
} | class CoredumpHandler {
private static final Pattern JAVA_CORE_PATTERN = Pattern.compile("java_pid.*\\.hprof");
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String LZ4_PATH = "/usr/bin/lz4";
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final Terminal terminal;
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final Path crashPatchInContainer;
private final Path doneCoredumpsPath;
private final String operatorGroupName;
private final Supplier<String> coredumpIdSupplier;
private final Metrics metrics;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
* @param operatorGroupName name of the group that will be set as the owner of the processed coredump
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics) {
this(terminal, coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
operatorGroupName, metrics, () -> UUID.randomUUID().toString());
}
CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics, Supplier<String> coredumpIdSupplier) {
this.terminal = terminal;
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.operatorGroupName = operatorGroupName;
this.metrics = metrics;
this.coredumpIdSupplier = coredumpIdSupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier) {
Path containerCrashPathOnHost = context.pathOnHostFromPathInNode(crashPatchInContainer);
Path containerProcessingPathOnHost = containerCrashPathOnHost.resolve(PROCESSING_DIRECTORY_NAME);
FileFinder.files(containerCrashPathOnHost)
.match(nameMatches(JAVA_CORE_PATTERN))
.maxDepth(1)
.deleteRecursively(context);
getCoredumpToProcess(containerCrashPathOnHost, containerProcessingPathOnHost)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
updateMetrics(context, containerCrashPathOnHost);
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<Path> getCoredumpToProcess(Path containerCrashPathOnHost, Path containerProcessingPathOnHost) {
return FileFinder.directories(containerProcessingPathOnHost).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.or(() -> enqueueCoredump(containerCrashPathOnHost, containerProcessingPathOnHost));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<Path> enqueueCoredump(Path containerCrashPathOnHost, Path containerProcessingPathOnHost) {
List<Path> toProcess = FileFinder.files(containerCrashPathOnHost)
.match(nameStartsWith(".").negate())
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.collect(Collectors.toList());
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
Path enqueuedDir = uncheck(() -> Files.createDirectories(containerProcessingPathOnHost.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
void processAndReportSingleCoredump(NodeAgentContext context, Path coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, Path coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!Files.exists(metadataPath.toPath())) {
Path coredumpFilePathOnHost = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Path coredumpFilePathInContainer = context.pathInNodeFromPathOnHost(coredumpFilePathOnHost);
Map<String, Object> metadata = coreCollector.collect(context, coredumpFilePathInContainer);
metadata.putAll(nodeAttributesSupplier.get());
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
/**
* Compresses core file (and deletes the uncompressed core), then moves the entire core dump processing
* directory to {@link
*/
private void finishProcessing(NodeAgentContext context, Path coredumpDirectory) throws IOException {
Path coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Path compressedCoreFile = coreFile.getParent().resolve(coreFile.getFileName() + ".lz4");
terminal.newCommandLine(context)
.add(LZ4_PATH, "-f", coreFile.toString(), compressedCoreFile.toString())
.setTimeout(Duration.ofMinutes(30))
.execute();
new UnixPath(compressedCoreFile).setGroup(operatorGroupName).setPermissions("rw-r-----");
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory, newCoredumpDirectory.resolve(coredumpDirectory.getFileName()));
}
Path findCoredumpFileInProcessingDirectory(Path coredumpProccessingDirectory) {
return FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(".lz4").negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
ApplicationId owner = node.owner().get();
NodeMembership membership = node.membership().get();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value())
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
.add("clustertype", membership.clusterType())
.add("clusterid", membership.clusterId());
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.allowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
node.currentVespaVersion().ifPresent(vespaVersion -> dimensionsBuilder.add("vespaVersion", vespaVersion.toFullString()));
return dimensionsBuilder.build();
}
} | class CoredumpHandler {
private static final Pattern JAVA_CORE_PATTERN = Pattern.compile("java_pid.*\\.hprof");
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String LZ4_PATH = "/usr/bin/lz4";
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final Terminal terminal;
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final Path crashPatchInContainer;
private final Path doneCoredumpsPath;
private final String operatorGroupName;
private final Supplier<String> coredumpIdSupplier;
private final Metrics metrics;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
* @param operatorGroupName name of the group that will be set as the owner of the processed coredump
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics) {
this(terminal, coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
operatorGroupName, metrics, () -> UUID.randomUUID().toString());
}
CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics, Supplier<String> coredumpIdSupplier) {
this.terminal = terminal;
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.operatorGroupName = operatorGroupName;
this.metrics = metrics;
this.coredumpIdSupplier = coredumpIdSupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier) {
Path containerCrashPathOnHost = context.pathOnHostFromPathInNode(crashPatchInContainer);
Path containerProcessingPathOnHost = containerCrashPathOnHost.resolve(PROCESSING_DIRECTORY_NAME);
FileFinder.files(containerCrashPathOnHost)
.match(nameMatches(JAVA_CORE_PATTERN))
.maxDepth(1)
.deleteRecursively(context);
updateMetrics(context, containerCrashPathOnHost);
getCoredumpToProcess(containerCrashPathOnHost, containerProcessingPathOnHost)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<Path> getCoredumpToProcess(Path containerCrashPathOnHost, Path containerProcessingPathOnHost) {
return FileFinder.directories(containerProcessingPathOnHost).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.or(() -> enqueueCoredump(containerCrashPathOnHost, containerProcessingPathOnHost));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<Path> enqueueCoredump(Path containerCrashPathOnHost, Path containerProcessingPathOnHost) {
List<Path> toProcess = FileFinder.files(containerCrashPathOnHost)
.match(nameStartsWith(".").negate())
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.collect(Collectors.toList());
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
Path enqueuedDir = uncheck(() -> Files.createDirectories(containerProcessingPathOnHost.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
void processAndReportSingleCoredump(NodeAgentContext context, Path coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, Path coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!Files.exists(metadataPath.toPath())) {
Path coredumpFilePathOnHost = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Path coredumpFilePathInContainer = context.pathInNodeFromPathOnHost(coredumpFilePathOnHost);
Map<String, Object> metadata = coreCollector.collect(context, coredumpFilePathInContainer);
metadata.putAll(nodeAttributesSupplier.get());
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
/**
* Compresses core file (and deletes the uncompressed core), then moves the entire core dump processing
* directory to {@link
*/
private void finishProcessing(NodeAgentContext context, Path coredumpDirectory) throws IOException {
Path coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Path compressedCoreFile = coreFile.getParent().resolve(coreFile.getFileName() + ".lz4");
terminal.newCommandLine(context)
.add(LZ4_PATH, "-f", coreFile.toString(), compressedCoreFile.toString())
.setTimeout(Duration.ofMinutes(30))
.execute();
new UnixPath(compressedCoreFile).setGroup(operatorGroupName).setPermissions("rw-r-----");
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory, newCoredumpDirectory.resolve(coredumpDirectory.getFileName()));
}
Path findCoredumpFileInProcessingDirectory(Path coredumpProccessingDirectory) {
return FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(".lz4").negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
ApplicationId owner = node.owner().get();
NodeMembership membership = node.membership().get();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value())
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
.add("clustertype", membership.clusterType())
.add("clusterid", membership.clusterId());
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.allowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
node.currentVespaVersion().ifPresent(vespaVersion -> dimensionsBuilder.add("vespaVersion", vespaVersion.toFullString()));
return dimensionsBuilder.build();
}
} |
Should be changed in `com.yahoo.vespa.hosted.provision.maintenance.Maintainer` too. | static long staggeredDelay(List<HostName> cluster, HostName host, Instant now, Duration interval) {
if ( ! cluster.contains(host))
return interval.toMillis();
long offset = cluster.indexOf(host) * interval.toMillis() / cluster.size();
long timeUntilNextRun = Math.floorMod(offset - now.toEpochMilli(), interval.toMillis());
return timeUntilNextRun;
} | return timeUntilNextRun; | static long staggeredDelay(List<HostName> cluster, HostName host, Instant now, Duration interval) {
if ( ! cluster.contains(host))
return interval.toMillis();
long offset = cluster.indexOf(host) * interval.toMillis() / cluster.size();
return Math.floorMod(offset - now.toEpochMilli(), interval.toMillis());
} | class Maintainer extends AbstractComponent implements Runnable {
protected static final Logger log = Logger.getLogger(Maintainer.class.getName());
private final Controller controller;
private final Duration maintenanceInterval;
private final JobControl jobControl;
private final ScheduledExecutorService service;
private final String name;
/** The systems in which this maintainer should run */
private final Set<SystemName> activeSystems;
public Maintainer(Controller controller, Duration interval, JobControl jobControl) {
this(controller, interval, jobControl, null, EnumSet.allOf(SystemName.class));
}
public Maintainer(Controller controller, Duration interval, JobControl jobControl, String name, Set<SystemName> activeSystems) {
if (interval.isNegative() || interval.isZero())
throw new IllegalArgumentException("Interval must be positive, but was " + interval);
this.controller = controller;
this.maintenanceInterval = interval;
this.jobControl = jobControl;
this.name = name;
this.activeSystems = Set.copyOf(activeSystems);
service = new ScheduledThreadPoolExecutor(1);
long delay = staggeredDelay(controller.curator().cluster(), controller.hostname(), controller.clock().instant(), interval);
service.scheduleAtFixedRate(this, delay, interval.toMillis(), TimeUnit.MILLISECONDS);
jobControl.started(name());
}
protected Controller controller() { return controller; }
@Override
public void run() {
try {
if ( ! activeSystems.contains(controller.system())) {
return;
}
if (jobControl.isActive(name())) {
try (Lock lock = jobControl.curator().lockMaintenanceJob(name())) {
maintain();
}
}
}
catch (TimeoutException e) {
}
catch (Throwable t) {
log.log(Level.WARNING, "Maintainer " + name() + " failed. Will retry in " +
maintenanceInterval + ": " + Exceptions.toMessageString(t));
}
}
@Override
public void deconstruct() {
var timeout = Duration.ofSeconds(30);
service.shutdown();
try {
if (!service.awaitTermination(timeout.toMillis(), TimeUnit.MILLISECONDS)) {
log.log(Level.WARNING, "Maintainer " + name() + " failed to shutdown " +
"within " + timeout);
}
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
/** Called once each time this maintenance job should run */
protected abstract void maintain();
public Duration maintenanceInterval() { return maintenanceInterval; }
public final String name() {
return name == null ? this.getClass().getSimpleName() : name;
}
/** Returns the name of this */
@Override
public final String toString() {
return name();
}
} | class Maintainer extends AbstractComponent implements Runnable {
protected static final Logger log = Logger.getLogger(Maintainer.class.getName());
private final Controller controller;
private final Duration maintenanceInterval;
private final JobControl jobControl;
private final ScheduledExecutorService service;
private final String name;
/** The systems in which this maintainer should run */
private final Set<SystemName> activeSystems;
public Maintainer(Controller controller, Duration interval, JobControl jobControl) {
this(controller, interval, jobControl, null, EnumSet.allOf(SystemName.class));
}
public Maintainer(Controller controller, Duration interval, JobControl jobControl, String name, Set<SystemName> activeSystems) {
if (interval.isNegative() || interval.isZero())
throw new IllegalArgumentException("Interval must be positive, but was " + interval);
this.controller = controller;
this.maintenanceInterval = interval;
this.jobControl = jobControl;
this.name = name;
this.activeSystems = Set.copyOf(activeSystems);
service = new ScheduledThreadPoolExecutor(1, r -> new Thread(r, name() + "-worker"));
long delay = staggeredDelay(controller.curator().cluster(), controller.hostname(), controller.clock().instant(), interval);
service.scheduleAtFixedRate(this, delay, interval.toMillis(), TimeUnit.MILLISECONDS);
jobControl.started(name());
}
protected Controller controller() { return controller; }
@Override
public void run() {
try {
if ( ! activeSystems.contains(controller.system())) {
return;
}
if (jobControl.isActive(name())) {
try (Lock lock = jobControl.curator().lockMaintenanceJob(name())) {
maintain();
}
}
}
catch (TimeoutException e) {
}
catch (Throwable t) {
log.log(Level.WARNING, "Maintainer " + name() + " failed. Will retry in " +
maintenanceInterval + ": " + Exceptions.toMessageString(t));
}
}
@Override
public void deconstruct() {
var timeout = Duration.ofSeconds(30);
service.shutdown();
try {
if (!service.awaitTermination(timeout.toMillis(), TimeUnit.MILLISECONDS)) {
log.log(Level.WARNING, "Maintainer " + name() + " failed to shutdown " +
"within " + timeout);
}
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
/** Called once each time this maintenance job should run */
protected abstract void maintain();
public Duration maintenanceInterval() { return maintenanceInterval; }
public final String name() {
return name == null ? this.getClass().getSimpleName() : name;
}
/** Returns the name of this */
@Override
public final String toString() {
return name();
}
} |
This is for number of coredumps in crash path, not the `processing` path. So I'll need to filter out hs_err files as well | void updateMetrics(NodeAgentContext context, Path containerCrashPathOnHost) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPathOnHost)
.match(nameStartsWith(".").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.files(processedCoredumpsPath)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
} | int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPathOnHost) | void updateMetrics(NodeAgentContext context, Path containerCrashPathOnHost) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPathOnHost)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
} | class CoredumpHandler {
private static final Pattern JAVA_CORE_PATTERN = Pattern.compile("java_pid.*\\.hprof");
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String LZ4_PATH = "/usr/bin/lz4";
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final Terminal terminal;
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final Path crashPatchInContainer;
private final Path doneCoredumpsPath;
private final String operatorGroupName;
private final Supplier<String> coredumpIdSupplier;
private final Metrics metrics;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
* @param operatorGroupName name of the group that will be set as the owner of the processed coredump
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics) {
this(terminal, coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
operatorGroupName, metrics, () -> UUID.randomUUID().toString());
}
CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics, Supplier<String> coredumpIdSupplier) {
this.terminal = terminal;
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.operatorGroupName = operatorGroupName;
this.metrics = metrics;
this.coredumpIdSupplier = coredumpIdSupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier) {
Path containerCrashPathOnHost = context.pathOnHostFromPathInNode(crashPatchInContainer);
Path containerProcessingPathOnHost = containerCrashPathOnHost.resolve(PROCESSING_DIRECTORY_NAME);
FileFinder.files(containerCrashPathOnHost)
.match(nameMatches(JAVA_CORE_PATTERN))
.maxDepth(1)
.deleteRecursively(context);
getCoredumpToProcess(containerCrashPathOnHost, containerProcessingPathOnHost)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
updateMetrics(context, containerCrashPathOnHost);
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<Path> getCoredumpToProcess(Path containerCrashPathOnHost, Path containerProcessingPathOnHost) {
return FileFinder.directories(containerProcessingPathOnHost).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.or(() -> enqueueCoredump(containerCrashPathOnHost, containerProcessingPathOnHost));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<Path> enqueueCoredump(Path containerCrashPathOnHost, Path containerProcessingPathOnHost) {
List<Path> toProcess = FileFinder.files(containerCrashPathOnHost)
.match(nameStartsWith(".").negate())
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.collect(Collectors.toList());
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
Path enqueuedDir = uncheck(() -> Files.createDirectories(containerProcessingPathOnHost.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
void processAndReportSingleCoredump(NodeAgentContext context, Path coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, Path coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!Files.exists(metadataPath.toPath())) {
Path coredumpFilePathOnHost = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Path coredumpFilePathInContainer = context.pathInNodeFromPathOnHost(coredumpFilePathOnHost);
Map<String, Object> metadata = coreCollector.collect(context, coredumpFilePathInContainer);
metadata.putAll(nodeAttributesSupplier.get());
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
/**
* Compresses core file (and deletes the uncompressed core), then moves the entire core dump processing
* directory to {@link
*/
private void finishProcessing(NodeAgentContext context, Path coredumpDirectory) throws IOException {
Path coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Path compressedCoreFile = coreFile.getParent().resolve(coreFile.getFileName() + ".lz4");
terminal.newCommandLine(context)
.add(LZ4_PATH, "-f", coreFile.toString(), compressedCoreFile.toString())
.setTimeout(Duration.ofMinutes(30))
.execute();
new UnixPath(compressedCoreFile).setGroup(operatorGroupName).setPermissions("rw-r-----");
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory, newCoredumpDirectory.resolve(coredumpDirectory.getFileName()));
}
Path findCoredumpFileInProcessingDirectory(Path coredumpProccessingDirectory) {
return FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(".lz4").negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
ApplicationId owner = node.owner().get();
NodeMembership membership = node.membership().get();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value())
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
.add("clustertype", membership.clusterType())
.add("clusterid", membership.clusterId());
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.allowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
node.currentVespaVersion().ifPresent(vespaVersion -> dimensionsBuilder.add("vespaVersion", vespaVersion.toFullString()));
return dimensionsBuilder.build();
}
} | class CoredumpHandler {
private static final Pattern JAVA_CORE_PATTERN = Pattern.compile("java_pid.*\\.hprof");
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String LZ4_PATH = "/usr/bin/lz4";
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final Terminal terminal;
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final Path crashPatchInContainer;
private final Path doneCoredumpsPath;
private final String operatorGroupName;
private final Supplier<String> coredumpIdSupplier;
private final Metrics metrics;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
* @param operatorGroupName name of the group that will be set as the owner of the processed coredump
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics) {
this(terminal, coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
operatorGroupName, metrics, () -> UUID.randomUUID().toString());
}
CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics, Supplier<String> coredumpIdSupplier) {
this.terminal = terminal;
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.operatorGroupName = operatorGroupName;
this.metrics = metrics;
this.coredumpIdSupplier = coredumpIdSupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier) {
Path containerCrashPathOnHost = context.pathOnHostFromPathInNode(crashPatchInContainer);
Path containerProcessingPathOnHost = containerCrashPathOnHost.resolve(PROCESSING_DIRECTORY_NAME);
FileFinder.files(containerCrashPathOnHost)
.match(nameMatches(JAVA_CORE_PATTERN))
.maxDepth(1)
.deleteRecursively(context);
updateMetrics(context, containerCrashPathOnHost);
getCoredumpToProcess(containerCrashPathOnHost, containerProcessingPathOnHost)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<Path> getCoredumpToProcess(Path containerCrashPathOnHost, Path containerProcessingPathOnHost) {
return FileFinder.directories(containerProcessingPathOnHost).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.or(() -> enqueueCoredump(containerCrashPathOnHost, containerProcessingPathOnHost));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<Path> enqueueCoredump(Path containerCrashPathOnHost, Path containerProcessingPathOnHost) {
List<Path> toProcess = FileFinder.files(containerCrashPathOnHost)
.match(nameStartsWith(".").negate())
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.collect(Collectors.toList());
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
Path enqueuedDir = uncheck(() -> Files.createDirectories(containerProcessingPathOnHost.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
void processAndReportSingleCoredump(NodeAgentContext context, Path coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, Path coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!Files.exists(metadataPath.toPath())) {
Path coredumpFilePathOnHost = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Path coredumpFilePathInContainer = context.pathInNodeFromPathOnHost(coredumpFilePathOnHost);
Map<String, Object> metadata = coreCollector.collect(context, coredumpFilePathInContainer);
metadata.putAll(nodeAttributesSupplier.get());
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
/**
* Compresses core file (and deletes the uncompressed core), then moves the entire core dump processing
* directory to {@link
*/
private void finishProcessing(NodeAgentContext context, Path coredumpDirectory) throws IOException {
Path coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Path compressedCoreFile = coreFile.getParent().resolve(coreFile.getFileName() + ".lz4");
terminal.newCommandLine(context)
.add(LZ4_PATH, "-f", coreFile.toString(), compressedCoreFile.toString())
.setTimeout(Duration.ofMinutes(30))
.execute();
new UnixPath(compressedCoreFile).setGroup(operatorGroupName).setPermissions("rw-r-----");
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory, newCoredumpDirectory.resolve(coredumpDirectory.getFileName()));
}
Path findCoredumpFileInProcessingDirectory(Path coredumpProccessingDirectory) {
return FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(".lz4").negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
ApplicationId owner = node.owner().get();
NodeMembership membership = node.membership().get();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value())
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
.add("clustertype", membership.clusterType())
.add("clusterid", membership.clusterId());
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.allowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
node.currentVespaVersion().ifPresent(vespaVersion -> dimensionsBuilder.add("vespaVersion", vespaVersion.toFullString()));
return dimensionsBuilder.build();
}
} |
`processing` is inside the crash path https://github.com/vespa-engine/vespa/blob/8bebe88526e449bbca0514d8bdfc87390cb26db4/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandler.java#L87 | void updateMetrics(NodeAgentContext context, Path containerCrashPathOnHost) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPathOnHost)
.match(nameStartsWith(".").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.files(processedCoredumpsPath)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
} | int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPathOnHost) | void updateMetrics(NodeAgentContext context, Path containerCrashPathOnHost) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPathOnHost)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
} | class CoredumpHandler {
private static final Pattern JAVA_CORE_PATTERN = Pattern.compile("java_pid.*\\.hprof");
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String LZ4_PATH = "/usr/bin/lz4";
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final Terminal terminal;
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final Path crashPatchInContainer;
private final Path doneCoredumpsPath;
private final String operatorGroupName;
private final Supplier<String> coredumpIdSupplier;
private final Metrics metrics;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
* @param operatorGroupName name of the group that will be set as the owner of the processed coredump
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics) {
this(terminal, coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
operatorGroupName, metrics, () -> UUID.randomUUID().toString());
}
CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics, Supplier<String> coredumpIdSupplier) {
this.terminal = terminal;
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.operatorGroupName = operatorGroupName;
this.metrics = metrics;
this.coredumpIdSupplier = coredumpIdSupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier) {
Path containerCrashPathOnHost = context.pathOnHostFromPathInNode(crashPatchInContainer);
Path containerProcessingPathOnHost = containerCrashPathOnHost.resolve(PROCESSING_DIRECTORY_NAME);
FileFinder.files(containerCrashPathOnHost)
.match(nameMatches(JAVA_CORE_PATTERN))
.maxDepth(1)
.deleteRecursively(context);
getCoredumpToProcess(containerCrashPathOnHost, containerProcessingPathOnHost)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
updateMetrics(context, containerCrashPathOnHost);
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<Path> getCoredumpToProcess(Path containerCrashPathOnHost, Path containerProcessingPathOnHost) {
return FileFinder.directories(containerProcessingPathOnHost).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.or(() -> enqueueCoredump(containerCrashPathOnHost, containerProcessingPathOnHost));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<Path> enqueueCoredump(Path containerCrashPathOnHost, Path containerProcessingPathOnHost) {
List<Path> toProcess = FileFinder.files(containerCrashPathOnHost)
.match(nameStartsWith(".").negate())
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.collect(Collectors.toList());
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
Path enqueuedDir = uncheck(() -> Files.createDirectories(containerProcessingPathOnHost.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
void processAndReportSingleCoredump(NodeAgentContext context, Path coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, Path coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!Files.exists(metadataPath.toPath())) {
Path coredumpFilePathOnHost = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Path coredumpFilePathInContainer = context.pathInNodeFromPathOnHost(coredumpFilePathOnHost);
Map<String, Object> metadata = coreCollector.collect(context, coredumpFilePathInContainer);
metadata.putAll(nodeAttributesSupplier.get());
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
/**
* Compresses core file (and deletes the uncompressed core), then moves the entire core dump processing
* directory to {@link
*/
private void finishProcessing(NodeAgentContext context, Path coredumpDirectory) throws IOException {
Path coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Path compressedCoreFile = coreFile.getParent().resolve(coreFile.getFileName() + ".lz4");
terminal.newCommandLine(context)
.add(LZ4_PATH, "-f", coreFile.toString(), compressedCoreFile.toString())
.setTimeout(Duration.ofMinutes(30))
.execute();
new UnixPath(compressedCoreFile).setGroup(operatorGroupName).setPermissions("rw-r-----");
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory, newCoredumpDirectory.resolve(coredumpDirectory.getFileName()));
}
Path findCoredumpFileInProcessingDirectory(Path coredumpProccessingDirectory) {
return FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(".lz4").negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
ApplicationId owner = node.owner().get();
NodeMembership membership = node.membership().get();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value())
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
.add("clustertype", membership.clusterType())
.add("clusterid", membership.clusterId());
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.allowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
node.currentVespaVersion().ifPresent(vespaVersion -> dimensionsBuilder.add("vespaVersion", vespaVersion.toFullString()));
return dimensionsBuilder.build();
}
} | class CoredumpHandler {
private static final Pattern JAVA_CORE_PATTERN = Pattern.compile("java_pid.*\\.hprof");
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String LZ4_PATH = "/usr/bin/lz4";
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final Terminal terminal;
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final Path crashPatchInContainer;
private final Path doneCoredumpsPath;
private final String operatorGroupName;
private final Supplier<String> coredumpIdSupplier;
private final Metrics metrics;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
* @param operatorGroupName name of the group that will be set as the owner of the processed coredump
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics) {
this(terminal, coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
operatorGroupName, metrics, () -> UUID.randomUUID().toString());
}
CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics, Supplier<String> coredumpIdSupplier) {
this.terminal = terminal;
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.operatorGroupName = operatorGroupName;
this.metrics = metrics;
this.coredumpIdSupplier = coredumpIdSupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier) {
Path containerCrashPathOnHost = context.pathOnHostFromPathInNode(crashPatchInContainer);
Path containerProcessingPathOnHost = containerCrashPathOnHost.resolve(PROCESSING_DIRECTORY_NAME);
FileFinder.files(containerCrashPathOnHost)
.match(nameMatches(JAVA_CORE_PATTERN))
.maxDepth(1)
.deleteRecursively(context);
updateMetrics(context, containerCrashPathOnHost);
getCoredumpToProcess(containerCrashPathOnHost, containerProcessingPathOnHost)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<Path> getCoredumpToProcess(Path containerCrashPathOnHost, Path containerProcessingPathOnHost) {
return FileFinder.directories(containerProcessingPathOnHost).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.or(() -> enqueueCoredump(containerCrashPathOnHost, containerProcessingPathOnHost));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<Path> enqueueCoredump(Path containerCrashPathOnHost, Path containerProcessingPathOnHost) {
List<Path> toProcess = FileFinder.files(containerCrashPathOnHost)
.match(nameStartsWith(".").negate())
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.collect(Collectors.toList());
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
Path enqueuedDir = uncheck(() -> Files.createDirectories(containerProcessingPathOnHost.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
void processAndReportSingleCoredump(NodeAgentContext context, Path coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, Path coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!Files.exists(metadataPath.toPath())) {
Path coredumpFilePathOnHost = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Path coredumpFilePathInContainer = context.pathInNodeFromPathOnHost(coredumpFilePathOnHost);
Map<String, Object> metadata = coreCollector.collect(context, coredumpFilePathInContainer);
metadata.putAll(nodeAttributesSupplier.get());
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
/**
* Compresses core file (and deletes the uncompressed core), then moves the entire core dump processing
* directory to {@link
*/
private void finishProcessing(NodeAgentContext context, Path coredumpDirectory) throws IOException {
Path coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Path compressedCoreFile = coreFile.getParent().resolve(coreFile.getFileName() + ".lz4");
terminal.newCommandLine(context)
.add(LZ4_PATH, "-f", coreFile.toString(), compressedCoreFile.toString())
.setTimeout(Duration.ofMinutes(30))
.execute();
new UnixPath(compressedCoreFile).setGroup(operatorGroupName).setPermissions("rw-r-----");
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory, newCoredumpDirectory.resolve(coredumpDirectory.getFileName()));
}
Path findCoredumpFileInProcessingDirectory(Path coredumpProccessingDirectory) {
return FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(".lz4").negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
ApplicationId owner = node.owner().get();
NodeMembership membership = node.membership().get();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value())
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
.add("clustertype", membership.clusterType())
.add("clusterid", membership.clusterId());
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.allowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
node.currentVespaVersion().ifPresent(vespaVersion -> dimensionsBuilder.add("vespaVersion", vespaVersion.toFullString()));
return dimensionsBuilder.build();
}
} |
Aha 👍 | void updateMetrics(NodeAgentContext context, Path containerCrashPathOnHost) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPathOnHost)
.match(nameStartsWith(".").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.files(processedCoredumpsPath)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
} | int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPathOnHost) | void updateMetrics(NodeAgentContext context, Path containerCrashPathOnHost) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPathOnHost)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
} | class CoredumpHandler {
private static final Pattern JAVA_CORE_PATTERN = Pattern.compile("java_pid.*\\.hprof");
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String LZ4_PATH = "/usr/bin/lz4";
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final Terminal terminal;
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final Path crashPatchInContainer;
private final Path doneCoredumpsPath;
private final String operatorGroupName;
private final Supplier<String> coredumpIdSupplier;
private final Metrics metrics;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
* @param operatorGroupName name of the group that will be set as the owner of the processed coredump
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics) {
this(terminal, coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
operatorGroupName, metrics, () -> UUID.randomUUID().toString());
}
CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics, Supplier<String> coredumpIdSupplier) {
this.terminal = terminal;
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.operatorGroupName = operatorGroupName;
this.metrics = metrics;
this.coredumpIdSupplier = coredumpIdSupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier) {
Path containerCrashPathOnHost = context.pathOnHostFromPathInNode(crashPatchInContainer);
Path containerProcessingPathOnHost = containerCrashPathOnHost.resolve(PROCESSING_DIRECTORY_NAME);
FileFinder.files(containerCrashPathOnHost)
.match(nameMatches(JAVA_CORE_PATTERN))
.maxDepth(1)
.deleteRecursively(context);
getCoredumpToProcess(containerCrashPathOnHost, containerProcessingPathOnHost)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
updateMetrics(context, containerCrashPathOnHost);
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<Path> getCoredumpToProcess(Path containerCrashPathOnHost, Path containerProcessingPathOnHost) {
return FileFinder.directories(containerProcessingPathOnHost).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.or(() -> enqueueCoredump(containerCrashPathOnHost, containerProcessingPathOnHost));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<Path> enqueueCoredump(Path containerCrashPathOnHost, Path containerProcessingPathOnHost) {
List<Path> toProcess = FileFinder.files(containerCrashPathOnHost)
.match(nameStartsWith(".").negate())
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.collect(Collectors.toList());
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
Path enqueuedDir = uncheck(() -> Files.createDirectories(containerProcessingPathOnHost.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
void processAndReportSingleCoredump(NodeAgentContext context, Path coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, Path coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!Files.exists(metadataPath.toPath())) {
Path coredumpFilePathOnHost = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Path coredumpFilePathInContainer = context.pathInNodeFromPathOnHost(coredumpFilePathOnHost);
Map<String, Object> metadata = coreCollector.collect(context, coredumpFilePathInContainer);
metadata.putAll(nodeAttributesSupplier.get());
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
/**
* Compresses core file (and deletes the uncompressed core), then moves the entire core dump processing
* directory to {@link
*/
private void finishProcessing(NodeAgentContext context, Path coredumpDirectory) throws IOException {
Path coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Path compressedCoreFile = coreFile.getParent().resolve(coreFile.getFileName() + ".lz4");
terminal.newCommandLine(context)
.add(LZ4_PATH, "-f", coreFile.toString(), compressedCoreFile.toString())
.setTimeout(Duration.ofMinutes(30))
.execute();
new UnixPath(compressedCoreFile).setGroup(operatorGroupName).setPermissions("rw-r-----");
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory, newCoredumpDirectory.resolve(coredumpDirectory.getFileName()));
}
Path findCoredumpFileInProcessingDirectory(Path coredumpProccessingDirectory) {
return FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(".lz4").negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
ApplicationId owner = node.owner().get();
NodeMembership membership = node.membership().get();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value())
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
.add("clustertype", membership.clusterType())
.add("clusterid", membership.clusterId());
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.allowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
node.currentVespaVersion().ifPresent(vespaVersion -> dimensionsBuilder.add("vespaVersion", vespaVersion.toFullString()));
return dimensionsBuilder.build();
}
} | class CoredumpHandler {
private static final Pattern JAVA_CORE_PATTERN = Pattern.compile("java_pid.*\\.hprof");
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String LZ4_PATH = "/usr/bin/lz4";
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final Terminal terminal;
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final Path crashPatchInContainer;
private final Path doneCoredumpsPath;
private final String operatorGroupName;
private final Supplier<String> coredumpIdSupplier;
private final Metrics metrics;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
* @param operatorGroupName name of the group that will be set as the owner of the processed coredump
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics) {
this(terminal, coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
operatorGroupName, metrics, () -> UUID.randomUUID().toString());
}
CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics, Supplier<String> coredumpIdSupplier) {
this.terminal = terminal;
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.operatorGroupName = operatorGroupName;
this.metrics = metrics;
this.coredumpIdSupplier = coredumpIdSupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier) {
Path containerCrashPathOnHost = context.pathOnHostFromPathInNode(crashPatchInContainer);
Path containerProcessingPathOnHost = containerCrashPathOnHost.resolve(PROCESSING_DIRECTORY_NAME);
FileFinder.files(containerCrashPathOnHost)
.match(nameMatches(JAVA_CORE_PATTERN))
.maxDepth(1)
.deleteRecursively(context);
updateMetrics(context, containerCrashPathOnHost);
getCoredumpToProcess(containerCrashPathOnHost, containerProcessingPathOnHost)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<Path> getCoredumpToProcess(Path containerCrashPathOnHost, Path containerProcessingPathOnHost) {
return FileFinder.directories(containerProcessingPathOnHost).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.or(() -> enqueueCoredump(containerCrashPathOnHost, containerProcessingPathOnHost));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<Path> enqueueCoredump(Path containerCrashPathOnHost, Path containerProcessingPathOnHost) {
List<Path> toProcess = FileFinder.files(containerCrashPathOnHost)
.match(nameStartsWith(".").negate())
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.collect(Collectors.toList());
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
Path enqueuedDir = uncheck(() -> Files.createDirectories(containerProcessingPathOnHost.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
void processAndReportSingleCoredump(NodeAgentContext context, Path coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, Path coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!Files.exists(metadataPath.toPath())) {
Path coredumpFilePathOnHost = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Path coredumpFilePathInContainer = context.pathInNodeFromPathOnHost(coredumpFilePathOnHost);
Map<String, Object> metadata = coreCollector.collect(context, coredumpFilePathInContainer);
metadata.putAll(nodeAttributesSupplier.get());
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
/**
* Compresses core file (and deletes the uncompressed core), then moves the entire core dump processing
* directory to {@link
*/
private void finishProcessing(NodeAgentContext context, Path coredumpDirectory) throws IOException {
Path coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Path compressedCoreFile = coreFile.getParent().resolve(coreFile.getFileName() + ".lz4");
terminal.newCommandLine(context)
.add(LZ4_PATH, "-f", coreFile.toString(), compressedCoreFile.toString())
.setTimeout(Duration.ofMinutes(30))
.execute();
new UnixPath(compressedCoreFile).setGroup(operatorGroupName).setPermissions("rw-r-----");
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory, newCoredumpDirectory.resolve(coredumpDirectory.getFileName()));
}
Path findCoredumpFileInProcessingDirectory(Path coredumpProccessingDirectory) {
return FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(".lz4").negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
ApplicationId owner = node.owner().get();
NodeMembership membership = node.membership().get();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value())
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
.add("clustertype", membership.clusterType())
.add("clusterid", membership.clusterId());
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.allowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
node.currentVespaVersion().ifPresent(vespaVersion -> dimensionsBuilder.add("vespaVersion", vespaVersion.toFullString()));
return dimensionsBuilder.build();
}
} |
Was this one intentional, or was it just temporary testing ? | public MockSearchCluster(String clusterId, DispatchConfig dispatchConfig, int groups, int nodesPerGroup) {
super(clusterId, dispatchConfig, null, null);
ImmutableList.Builder<Group> orderedGroupBuilder = ImmutableList.builder();
ImmutableMap.Builder<Integer, Group> groupBuilder = ImmutableMap.builder();
ImmutableMultimap.Builder<String, Node> hostBuilder = ImmutableMultimap.builder();
int distributionKey = 0;
for (int group = 0; group < groups; group++) {
List<Node> nodes = new ArrayList<>();
for (int node = 0; node < nodesPerGroup; node++) {
Node n = new Node(distributionKey, "host" + distributionKey, group);
nodes.add(n);
hostBuilder.put(n.hostname(), n);
distributionKey++;
}
Group g = new Group(group, nodes);
groupBuilder.put(group, g);
orderedGroupBuilder.add(g);
}
this.orderedGroups = orderedGroupBuilder.build();
this.groups = groupBuilder.build();
this.nodesByHost = hostBuilder.build();
this.numGroups = groups;
this.numNodesPerGroup = nodesPerGroup;
} | nodes.add(n); | public MockSearchCluster(String clusterId, DispatchConfig dispatchConfig, int groups, int nodesPerGroup) {
super(clusterId, dispatchConfig, null, null);
ImmutableList.Builder<Group> orderedGroupBuilder = ImmutableList.builder();
ImmutableMap.Builder<Integer, Group> groupBuilder = ImmutableMap.builder();
ImmutableMultimap.Builder<String, Node> hostBuilder = ImmutableMultimap.builder();
int distributionKey = 0;
for (int group = 0; group < groups; group++) {
List<Node> nodes = new ArrayList<>();
for (int node = 0; node < nodesPerGroup; node++) {
Node n = new Node(distributionKey, "host" + distributionKey, group);
nodes.add(n);
hostBuilder.put(n.hostname(), n);
distributionKey++;
}
Group g = new Group(group, nodes);
groupBuilder.put(group, g);
orderedGroupBuilder.add(g);
}
this.orderedGroups = orderedGroupBuilder.build();
this.groups = groupBuilder.build();
this.nodesByHost = hostBuilder.build();
this.numGroups = groups;
this.numNodesPerGroup = nodesPerGroup;
} | class MockSearchCluster extends SearchCluster {
private final int numGroups;
private final int numNodesPerGroup;
private final ImmutableList<Group> orderedGroups;
private final ImmutableMap<Integer, Group> groups;
private final ImmutableMultimap<String, Node> nodesByHost;
public MockSearchCluster(String clusterId, int groups, int nodesPerGroup) {
this(clusterId, createDispatchConfig(), groups, nodesPerGroup);
}
@Override
public ImmutableList<Group> orderedGroups() {
return orderedGroups;
}
@Override
public int size() {
return numGroups * numNodesPerGroup;
}
@Override
public ImmutableMap<Integer, Group> groups() {
return groups;
}
@Override
public int groupSize() {
return numNodesPerGroup;
}
@Override
public int groupsWithSufficientCoverage() {
return numGroups;
}
@Override
public Optional<Group> group(int n) {
if (n < numGroups) {
return Optional.of(groups.get(n));
} else {
return Optional.empty();
}
}
@Override
public Optional<Node> localCorpusDispatchTarget() {
return Optional.empty();
}
@Override
public void working(Node node) {
node.setWorking(true);
}
@Override
public void failed(Node node) {
node.setWorking(false);
}
public static DispatchConfig createDispatchConfig(Node... nodes) {
return createDispatchConfig(100.0, nodes);
}
public static DispatchConfig createDispatchConfig(List<Node> nodes) {
return createDispatchConfig(100.0, nodes);
}
public static DispatchConfig createDispatchConfig(double minSearchCoverage, Node... nodes) {
return createDispatchConfig(minSearchCoverage, Arrays.asList(nodes));
}
public static DispatchConfig createDispatchConfig(double minSearchCoverage, List<Node> nodes) {
DispatchConfig.Builder builder = new DispatchConfig.Builder();
builder.minActivedocsPercentage(88.0);
builder.minGroupCoverage(99.0);
builder.maxNodesDownPerGroup(0);
builder.minSearchCoverage(minSearchCoverage);
if (minSearchCoverage < 100.0) {
builder.minWaitAfterCoverageFactor(0);
builder.maxWaitAfterCoverageFactor(0.5);
}
int port = 10000;
for (Node n : nodes) {
builder.node(new DispatchConfig.Node.Builder().key(n.key()).host(n.hostname()).port(port++).group(n.group()));
}
return new DispatchConfig(builder);
}
} | class MockSearchCluster extends SearchCluster {
private final int numGroups;
private final int numNodesPerGroup;
private final ImmutableList<Group> orderedGroups;
private final ImmutableMap<Integer, Group> groups;
private final ImmutableMultimap<String, Node> nodesByHost;
public MockSearchCluster(String clusterId, int groups, int nodesPerGroup) {
this(clusterId, createDispatchConfig(), groups, nodesPerGroup);
}
@Override
public ImmutableList<Group> orderedGroups() {
return orderedGroups;
}
@Override
public int size() {
return numGroups * numNodesPerGroup;
}
@Override
public ImmutableMap<Integer, Group> groups() {
return groups;
}
@Override
public int groupSize() {
return numNodesPerGroup;
}
@Override
public int groupsWithSufficientCoverage() {
return numGroups;
}
@Override
public Optional<Group> group(int n) {
if (n < numGroups) {
return Optional.of(groups.get(n));
} else {
return Optional.empty();
}
}
@Override
public Optional<Node> localCorpusDispatchTarget() {
return Optional.empty();
}
@Override
public void working(Node node) {
node.setWorking(true);
}
@Override
public void failed(Node node) {
node.setWorking(false);
}
public static DispatchConfig createDispatchConfig(Node... nodes) {
return createDispatchConfig(100.0, nodes);
}
public static DispatchConfig createDispatchConfig(List<Node> nodes) {
return createDispatchConfig(100.0, nodes);
}
public static DispatchConfig createDispatchConfig(double minSearchCoverage, Node... nodes) {
return createDispatchConfig(minSearchCoverage, Arrays.asList(nodes));
}
public static DispatchConfig createDispatchConfig(double minSearchCoverage, List<Node> nodes) {
DispatchConfig.Builder builder = new DispatchConfig.Builder();
builder.minActivedocsPercentage(88.0);
builder.minGroupCoverage(99.0);
builder.maxNodesDownPerGroup(0);
builder.minSearchCoverage(minSearchCoverage);
if (minSearchCoverage < 100.0) {
builder.minWaitAfterCoverageFactor(0);
builder.maxWaitAfterCoverageFactor(0.5);
}
int port = 10000;
for (Node n : nodes) {
builder.node(new DispatchConfig.Node.Builder().key(n.key()).host(n.hostname()).port(port++).group(n.group()));
}
return new DispatchConfig(builder);
}
} |
Intentional, as it reproduces the true initial state (which would have surfaced this bug). | public MockSearchCluster(String clusterId, DispatchConfig dispatchConfig, int groups, int nodesPerGroup) {
super(clusterId, dispatchConfig, null, null);
ImmutableList.Builder<Group> orderedGroupBuilder = ImmutableList.builder();
ImmutableMap.Builder<Integer, Group> groupBuilder = ImmutableMap.builder();
ImmutableMultimap.Builder<String, Node> hostBuilder = ImmutableMultimap.builder();
int distributionKey = 0;
for (int group = 0; group < groups; group++) {
List<Node> nodes = new ArrayList<>();
for (int node = 0; node < nodesPerGroup; node++) {
Node n = new Node(distributionKey, "host" + distributionKey, group);
nodes.add(n);
hostBuilder.put(n.hostname(), n);
distributionKey++;
}
Group g = new Group(group, nodes);
groupBuilder.put(group, g);
orderedGroupBuilder.add(g);
}
this.orderedGroups = orderedGroupBuilder.build();
this.groups = groupBuilder.build();
this.nodesByHost = hostBuilder.build();
this.numGroups = groups;
this.numNodesPerGroup = nodesPerGroup;
} | nodes.add(n); | public MockSearchCluster(String clusterId, DispatchConfig dispatchConfig, int groups, int nodesPerGroup) {
super(clusterId, dispatchConfig, null, null);
ImmutableList.Builder<Group> orderedGroupBuilder = ImmutableList.builder();
ImmutableMap.Builder<Integer, Group> groupBuilder = ImmutableMap.builder();
ImmutableMultimap.Builder<String, Node> hostBuilder = ImmutableMultimap.builder();
int distributionKey = 0;
for (int group = 0; group < groups; group++) {
List<Node> nodes = new ArrayList<>();
for (int node = 0; node < nodesPerGroup; node++) {
Node n = new Node(distributionKey, "host" + distributionKey, group);
nodes.add(n);
hostBuilder.put(n.hostname(), n);
distributionKey++;
}
Group g = new Group(group, nodes);
groupBuilder.put(group, g);
orderedGroupBuilder.add(g);
}
this.orderedGroups = orderedGroupBuilder.build();
this.groups = groupBuilder.build();
this.nodesByHost = hostBuilder.build();
this.numGroups = groups;
this.numNodesPerGroup = nodesPerGroup;
} | class MockSearchCluster extends SearchCluster {
private final int numGroups;
private final int numNodesPerGroup;
private final ImmutableList<Group> orderedGroups;
private final ImmutableMap<Integer, Group> groups;
private final ImmutableMultimap<String, Node> nodesByHost;
public MockSearchCluster(String clusterId, int groups, int nodesPerGroup) {
this(clusterId, createDispatchConfig(), groups, nodesPerGroup);
}
@Override
public ImmutableList<Group> orderedGroups() {
return orderedGroups;
}
@Override
public int size() {
return numGroups * numNodesPerGroup;
}
@Override
public ImmutableMap<Integer, Group> groups() {
return groups;
}
@Override
public int groupSize() {
return numNodesPerGroup;
}
@Override
public int groupsWithSufficientCoverage() {
return numGroups;
}
@Override
public Optional<Group> group(int n) {
if (n < numGroups) {
return Optional.of(groups.get(n));
} else {
return Optional.empty();
}
}
@Override
public Optional<Node> localCorpusDispatchTarget() {
return Optional.empty();
}
@Override
public void working(Node node) {
node.setWorking(true);
}
@Override
public void failed(Node node) {
node.setWorking(false);
}
public static DispatchConfig createDispatchConfig(Node... nodes) {
return createDispatchConfig(100.0, nodes);
}
public static DispatchConfig createDispatchConfig(List<Node> nodes) {
return createDispatchConfig(100.0, nodes);
}
public static DispatchConfig createDispatchConfig(double minSearchCoverage, Node... nodes) {
return createDispatchConfig(minSearchCoverage, Arrays.asList(nodes));
}
public static DispatchConfig createDispatchConfig(double minSearchCoverage, List<Node> nodes) {
DispatchConfig.Builder builder = new DispatchConfig.Builder();
builder.minActivedocsPercentage(88.0);
builder.minGroupCoverage(99.0);
builder.maxNodesDownPerGroup(0);
builder.minSearchCoverage(minSearchCoverage);
if (minSearchCoverage < 100.0) {
builder.minWaitAfterCoverageFactor(0);
builder.maxWaitAfterCoverageFactor(0.5);
}
int port = 10000;
for (Node n : nodes) {
builder.node(new DispatchConfig.Node.Builder().key(n.key()).host(n.hostname()).port(port++).group(n.group()));
}
return new DispatchConfig(builder);
}
} | class MockSearchCluster extends SearchCluster {
private final int numGroups;
private final int numNodesPerGroup;
private final ImmutableList<Group> orderedGroups;
private final ImmutableMap<Integer, Group> groups;
private final ImmutableMultimap<String, Node> nodesByHost;
public MockSearchCluster(String clusterId, int groups, int nodesPerGroup) {
this(clusterId, createDispatchConfig(), groups, nodesPerGroup);
}
@Override
public ImmutableList<Group> orderedGroups() {
return orderedGroups;
}
@Override
public int size() {
return numGroups * numNodesPerGroup;
}
@Override
public ImmutableMap<Integer, Group> groups() {
return groups;
}
@Override
public int groupSize() {
return numNodesPerGroup;
}
@Override
public int groupsWithSufficientCoverage() {
return numGroups;
}
@Override
public Optional<Group> group(int n) {
if (n < numGroups) {
return Optional.of(groups.get(n));
} else {
return Optional.empty();
}
}
@Override
public Optional<Node> localCorpusDispatchTarget() {
return Optional.empty();
}
@Override
public void working(Node node) {
node.setWorking(true);
}
@Override
public void failed(Node node) {
node.setWorking(false);
}
public static DispatchConfig createDispatchConfig(Node... nodes) {
return createDispatchConfig(100.0, nodes);
}
public static DispatchConfig createDispatchConfig(List<Node> nodes) {
return createDispatchConfig(100.0, nodes);
}
public static DispatchConfig createDispatchConfig(double minSearchCoverage, Node... nodes) {
return createDispatchConfig(minSearchCoverage, Arrays.asList(nodes));
}
public static DispatchConfig createDispatchConfig(double minSearchCoverage, List<Node> nodes) {
DispatchConfig.Builder builder = new DispatchConfig.Builder();
builder.minActivedocsPercentage(88.0);
builder.minGroupCoverage(99.0);
builder.maxNodesDownPerGroup(0);
builder.minSearchCoverage(minSearchCoverage);
if (minSearchCoverage < 100.0) {
builder.minWaitAfterCoverageFactor(0);
builder.maxWaitAfterCoverageFactor(0.5);
}
int port = 10000;
for (Node n : nodes) {
builder.node(new DispatchConfig.Node.Builder().key(n.key()).host(n.hostname()).port(port++).group(n.group()));
}
return new DispatchConfig(builder);
}
} |
Use `name()` in case maintainer has specified a custom one. | public Maintainer(Controller controller, Duration interval, JobControl jobControl, String name, Set<SystemName> activeSystems) {
if (interval.isNegative() || interval.isZero())
throw new IllegalArgumentException("Interval must be positive, but was " + interval);
this.controller = controller;
this.maintenanceInterval = interval;
this.jobControl = jobControl;
this.name = name;
this.activeSystems = Set.copyOf(activeSystems);
service = new ScheduledThreadPoolExecutor(1, r -> new Thread(r, getClass().getSimpleName() + "-worker"));
long delay = staggeredDelay(controller.curator().cluster(), controller.hostname(), controller.clock().instant(), interval);
service.scheduleAtFixedRate(this, delay, interval.toMillis(), TimeUnit.MILLISECONDS);
jobControl.started(name());
} | service = new ScheduledThreadPoolExecutor(1, r -> new Thread(r, getClass().getSimpleName() + "-worker")); | public Maintainer(Controller controller, Duration interval, JobControl jobControl, String name, Set<SystemName> activeSystems) {
if (interval.isNegative() || interval.isZero())
throw new IllegalArgumentException("Interval must be positive, but was " + interval);
this.controller = controller;
this.maintenanceInterval = interval;
this.jobControl = jobControl;
this.name = name;
this.activeSystems = Set.copyOf(activeSystems);
service = new ScheduledThreadPoolExecutor(1, r -> new Thread(r, name() + "-worker"));
long delay = staggeredDelay(controller.curator().cluster(), controller.hostname(), controller.clock().instant(), interval);
service.scheduleAtFixedRate(this, delay, interval.toMillis(), TimeUnit.MILLISECONDS);
jobControl.started(name());
} | class Maintainer extends AbstractComponent implements Runnable {
protected static final Logger log = Logger.getLogger(Maintainer.class.getName());
private final Controller controller;
private final Duration maintenanceInterval;
private final JobControl jobControl;
private final ScheduledExecutorService service;
private final String name;
/** The systems in which this maintainer should run */
private final Set<SystemName> activeSystems;
public Maintainer(Controller controller, Duration interval, JobControl jobControl) {
this(controller, interval, jobControl, null, EnumSet.allOf(SystemName.class));
}
protected Controller controller() { return controller; }
@Override
public void run() {
try {
if ( ! activeSystems.contains(controller.system())) {
return;
}
if (jobControl.isActive(name())) {
try (Lock lock = jobControl.curator().lockMaintenanceJob(name())) {
maintain();
}
}
}
catch (TimeoutException e) {
}
catch (Throwable t) {
log.log(Level.WARNING, "Maintainer " + name() + " failed. Will retry in " +
maintenanceInterval + ": " + Exceptions.toMessageString(t));
}
}
@Override
public void deconstruct() {
var timeout = Duration.ofSeconds(30);
service.shutdown();
try {
if (!service.awaitTermination(timeout.toMillis(), TimeUnit.MILLISECONDS)) {
log.log(Level.WARNING, "Maintainer " + name() + " failed to shutdown " +
"within " + timeout);
}
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
/** Called once each time this maintenance job should run */
protected abstract void maintain();
public Duration maintenanceInterval() { return maintenanceInterval; }
public final String name() {
return name == null ? this.getClass().getSimpleName() : name;
}
/** Returns the name of this */
@Override
public final String toString() {
return name();
}
static long staggeredDelay(List<HostName> cluster, HostName host, Instant now, Duration interval) {
if ( ! cluster.contains(host))
return interval.toMillis();
long offset = cluster.indexOf(host) * interval.toMillis() / cluster.size();
long timeUntilNextRun = Math.floorMod(offset - now.toEpochMilli(), interval.toMillis());
return timeUntilNextRun;
}
} | class Maintainer extends AbstractComponent implements Runnable {
protected static final Logger log = Logger.getLogger(Maintainer.class.getName());
private final Controller controller;
private final Duration maintenanceInterval;
private final JobControl jobControl;
private final ScheduledExecutorService service;
private final String name;
/** The systems in which this maintainer should run */
private final Set<SystemName> activeSystems;
public Maintainer(Controller controller, Duration interval, JobControl jobControl) {
this(controller, interval, jobControl, null, EnumSet.allOf(SystemName.class));
}
protected Controller controller() { return controller; }
@Override
public void run() {
try {
if ( ! activeSystems.contains(controller.system())) {
return;
}
if (jobControl.isActive(name())) {
try (Lock lock = jobControl.curator().lockMaintenanceJob(name())) {
maintain();
}
}
}
catch (TimeoutException e) {
}
catch (Throwable t) {
log.log(Level.WARNING, "Maintainer " + name() + " failed. Will retry in " +
maintenanceInterval + ": " + Exceptions.toMessageString(t));
}
}
@Override
public void deconstruct() {
var timeout = Duration.ofSeconds(30);
service.shutdown();
try {
if (!service.awaitTermination(timeout.toMillis(), TimeUnit.MILLISECONDS)) {
log.log(Level.WARNING, "Maintainer " + name() + " failed to shutdown " +
"within " + timeout);
}
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
/** Called once each time this maintenance job should run */
protected abstract void maintain();
public Duration maintenanceInterval() { return maintenanceInterval; }
public final String name() {
return name == null ? this.getClass().getSimpleName() : name;
}
/** Returns the name of this */
@Override
public final String toString() {
return name();
}
static long staggeredDelay(List<HostName> cluster, HostName host, Instant now, Duration interval) {
if ( ! cluster.contains(host))
return interval.toMillis();
long offset = cluster.indexOf(host) * interval.toMillis() / cluster.size();
return Math.floorMod(offset - now.toEpochMilli(), interval.toMillis());
}
} |
😁 | protected void maintain() {
TenantController tenants = controller().tenants();
for (Tenant tenant : tenants.asList()) {
log.log(INFO, "Updating contact information for " + tenant);
try {
switch (tenant.type()) {
case athenz:
tenants.lockIfPresent(tenant.name(), LockedTenant.Athenz.class, lockedTenant -> {
Contact contact = contactRetriever.getContact(lockedTenant.get().propertyId());
log.log(INFO, "Contact found for " + tenant + " was " +
(Optional.of(contact).equals(tenant.contact()) ? "un" : "") + "changed");
tenants.store(lockedTenant.with(contact));
});
break;
case cloud:
break;
default:
throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed to update contact information for " + tenant + ": " +
Exceptions.toMessageString(e) + ". Retrying in " +
maintenanceInterval());
}
}
} | break; | protected void maintain() {
TenantController tenants = controller().tenants();
for (Tenant tenant : tenants.asList()) {
log.log(INFO, "Updating contact information for " + tenant);
try {
switch (tenant.type()) {
case athenz:
tenants.lockIfPresent(tenant.name(), LockedTenant.Athenz.class, lockedTenant -> {
Contact contact = contactRetriever.getContact(lockedTenant.get().propertyId());
log.log(INFO, "Contact found for " + tenant + " was " +
(Optional.of(contact).equals(tenant.contact()) ? "un" : "") + "changed");
tenants.store(lockedTenant.with(contact));
});
break;
case cloud:
break;
default:
throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed to update contact information for " + tenant + ": " +
Exceptions.toMessageString(e) + ". Retrying in " +
maintenanceInterval());
}
}
} | class ContactInformationMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(ContactInformationMaintainer.class.getName());
private final ContactRetriever contactRetriever;
public ContactInformationMaintainer(Controller controller, Duration interval, JobControl jobControl) {
super(controller, interval, jobControl, null, SystemName.allOf(Predicate.not(SystemName::isPublic)));
this.contactRetriever = controller.serviceRegistry().contactRetriever();
}
@Override
} | class ContactInformationMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(ContactInformationMaintainer.class.getName());
private final ContactRetriever contactRetriever;
public ContactInformationMaintainer(Controller controller, Duration interval, JobControl jobControl) {
super(controller, interval, jobControl, null, SystemName.allOf(Predicate.not(SystemName::isPublic)));
this.contactRetriever = controller.serviceRegistry().contactRetriever();
}
@Override
} |
```suggestion service = new ScheduledThreadPoolExecutor(1, r -> new Thread(r, name() + "-worker")); ``` | public Maintainer(Controller controller, Duration interval, JobControl jobControl, String name, Set<SystemName> activeSystems) {
if (interval.isNegative() || interval.isZero())
throw new IllegalArgumentException("Interval must be positive, but was " + interval);
this.controller = controller;
this.maintenanceInterval = interval;
this.jobControl = jobControl;
this.name = name;
this.activeSystems = Set.copyOf(activeSystems);
service = new ScheduledThreadPoolExecutor(1, r -> new Thread(r, getClass().getSimpleName() + "-worker"));
long delay = staggeredDelay(controller.curator().cluster(), controller.hostname(), controller.clock().instant(), interval);
service.scheduleAtFixedRate(this, delay, interval.toMillis(), TimeUnit.MILLISECONDS);
jobControl.started(name());
} | service = new ScheduledThreadPoolExecutor(1, r -> new Thread(r, getClass().getSimpleName() + "-worker")); | public Maintainer(Controller controller, Duration interval, JobControl jobControl, String name, Set<SystemName> activeSystems) {
if (interval.isNegative() || interval.isZero())
throw new IllegalArgumentException("Interval must be positive, but was " + interval);
this.controller = controller;
this.maintenanceInterval = interval;
this.jobControl = jobControl;
this.name = name;
this.activeSystems = Set.copyOf(activeSystems);
service = new ScheduledThreadPoolExecutor(1, r -> new Thread(r, name() + "-worker"));
long delay = staggeredDelay(controller.curator().cluster(), controller.hostname(), controller.clock().instant(), interval);
service.scheduleAtFixedRate(this, delay, interval.toMillis(), TimeUnit.MILLISECONDS);
jobControl.started(name());
} | class Maintainer extends AbstractComponent implements Runnable {
protected static final Logger log = Logger.getLogger(Maintainer.class.getName());
private final Controller controller;
private final Duration maintenanceInterval;
private final JobControl jobControl;
private final ScheduledExecutorService service;
private final String name;
/** The systems in which this maintainer should run */
private final Set<SystemName> activeSystems;
public Maintainer(Controller controller, Duration interval, JobControl jobControl) {
this(controller, interval, jobControl, null, EnumSet.allOf(SystemName.class));
}
protected Controller controller() { return controller; }
@Override
public void run() {
try {
if ( ! activeSystems.contains(controller.system())) {
return;
}
if (jobControl.isActive(name())) {
try (Lock lock = jobControl.curator().lockMaintenanceJob(name())) {
maintain();
}
}
}
catch (TimeoutException e) {
}
catch (Throwable t) {
log.log(Level.WARNING, "Maintainer " + name() + " failed. Will retry in " +
maintenanceInterval + ": " + Exceptions.toMessageString(t));
}
}
@Override
public void deconstruct() {
var timeout = Duration.ofSeconds(30);
service.shutdown();
try {
if (!service.awaitTermination(timeout.toMillis(), TimeUnit.MILLISECONDS)) {
log.log(Level.WARNING, "Maintainer " + name() + " failed to shutdown " +
"within " + timeout);
}
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
/** Called once each time this maintenance job should run */
protected abstract void maintain();
public Duration maintenanceInterval() { return maintenanceInterval; }
public final String name() {
return name == null ? this.getClass().getSimpleName() : name;
}
/** Returns the name of this */
@Override
public final String toString() {
return name();
}
static long staggeredDelay(List<HostName> cluster, HostName host, Instant now, Duration interval) {
if ( ! cluster.contains(host))
return interval.toMillis();
long offset = cluster.indexOf(host) * interval.toMillis() / cluster.size();
long timeUntilNextRun = Math.floorMod(offset - now.toEpochMilli(), interval.toMillis());
return timeUntilNextRun;
}
} | class Maintainer extends AbstractComponent implements Runnable {
protected static final Logger log = Logger.getLogger(Maintainer.class.getName());
private final Controller controller;
private final Duration maintenanceInterval;
private final JobControl jobControl;
private final ScheduledExecutorService service;
private final String name;
/** The systems in which this maintainer should run */
private final Set<SystemName> activeSystems;
public Maintainer(Controller controller, Duration interval, JobControl jobControl) {
this(controller, interval, jobControl, null, EnumSet.allOf(SystemName.class));
}
protected Controller controller() { return controller; }
@Override
public void run() {
try {
if ( ! activeSystems.contains(controller.system())) {
return;
}
if (jobControl.isActive(name())) {
try (Lock lock = jobControl.curator().lockMaintenanceJob(name())) {
maintain();
}
}
}
catch (TimeoutException e) {
}
catch (Throwable t) {
log.log(Level.WARNING, "Maintainer " + name() + " failed. Will retry in " +
maintenanceInterval + ": " + Exceptions.toMessageString(t));
}
}
@Override
public void deconstruct() {
var timeout = Duration.ofSeconds(30);
service.shutdown();
try {
if (!service.awaitTermination(timeout.toMillis(), TimeUnit.MILLISECONDS)) {
log.log(Level.WARNING, "Maintainer " + name() + " failed to shutdown " +
"within " + timeout);
}
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
/** Called once each time this maintenance job should run */
protected abstract void maintain();
public Duration maintenanceInterval() { return maintenanceInterval; }
public final String name() {
return name == null ? this.getClass().getSimpleName() : name;
}
/** Returns the name of this */
@Override
public final String toString() {
return name();
}
static long staggeredDelay(List<HostName> cluster, HostName host, Instant now, Duration interval) {
if ( ! cluster.contains(host))
return interval.toMillis();
long offset = cluster.indexOf(host) * interval.toMillis() / cluster.size();
return Math.floorMod(offset - now.toEpochMilli(), interval.toMillis());
}
} |
Done | public void testBasics() throws Exception {
StorageCluster storage = parse("<content id=\"foofighters\"><documents/>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</content>\n");
assertEquals(1, storage.getChildren().size());
{
StorServerConfig.Builder builder = new StorServerConfig.Builder();
storage.getConfig(builder);
StorServerConfig config = new StorServerConfig(builder);
assertEquals(false, config.is_distributor());
assertEquals("foofighters", config.cluster_name());
}
{
StorCommunicationmanagerConfig.Builder builder = new StorCommunicationmanagerConfig.Builder();
storage.getChildren().get("0").getConfig(builder);
StorCommunicationmanagerConfig config = new StorCommunicationmanagerConfig(builder);
assertFalse(config.mbus().dispatch_on_encode());
}
} | assertFalse(config.mbus().dispatch_on_encode()); | public void testBasics() {
StorageCluster storage = parse("<content id=\"foofighters\"><documents/>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</content>\n");
assertEquals(1, storage.getChildren().size());
StorServerConfig.Builder builder = new StorServerConfig.Builder();
storage.getConfig(builder);
StorServerConfig config = new StorServerConfig(builder);
assertFalse(config.is_distributor());
assertEquals("foofighters", config.cluster_name());
} | class StorageClusterTest {
StorageCluster parse(String xml, Flavor flavor) {
MockRoot root = new MockRoot("", new DeployState.Builder()
.applicationPackage(new MockApplicationPackage.Builder().build())
.modelHostProvisioner(new SingleNodeProvisioner(flavor)).build());
return parse(xml, root);
}
StorageCluster parse(String xml, Flavor flavor, ModelContext.Properties properties) {
MockRoot root = new MockRoot("", new DeployState.Builder()
.applicationPackage(new MockApplicationPackage.Builder().build())
.modelHostProvisioner(new SingleNodeProvisioner(flavor))
.properties(properties).build());
return parse(xml, root);
}
StorageCluster parse(String xml) {
MockRoot root = new MockRoot();
return parse(xml, root);
}
StorageCluster parse(String xml, MockRoot root) {
root.getDeployState().getDocumentModel().getDocumentManager().add(
new NewDocumentType(new NewDocumentType.Name("music"))
);
root.getDeployState().getDocumentModel().getDocumentManager().add(
new NewDocumentType(new NewDocumentType.Name("movies"))
);
ContentCluster cluster = ContentClusterUtils.createCluster(xml, root);
root.freezeModelTopology();
return cluster.getStorageNodes();
}
@Test
@Test
public void testMerges() {
StorServerConfig.Builder builder = new StorServerConfig.Builder();
parse("" +
"<content id=\"foofighters\">\n" +
" <documents/>" +
" <tuning>" +
" <merges max-per-node=\"1K\" max-queue-size=\"10K\"/>\n" +
" </tuning>" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</content>"
).getConfig(builder);
StorServerConfig config = new StorServerConfig(builder);
assertEquals(1024, config.max_merges_per_node());
assertEquals(1024*10, config.max_merge_queue_size());
}
@Test
public void testVisitors() throws Exception {
StorVisitorConfig.Builder builder = new StorVisitorConfig.Builder();
parse(
"<cluster id=\"bees\">\n" +
" <documents/>" +
" <tuning>\n" +
" <visitors thread-count=\"7\" max-queue-size=\"1000\">\n" +
" <max-concurrent fixed=\"42\" variable=\"100\"/>\n" +
" </visitors>\n" +
" </tuning>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>"
).getConfig(builder);
StorVisitorConfig config = new StorVisitorConfig(builder);
assertEquals(42, config.maxconcurrentvisitors_fixed());
assertEquals(100, config.maxconcurrentvisitors_variable());
assertEquals(7, config.visitorthreads());
assertEquals(1000, config.maxvisitorqueuesize());
}
@Test
public void testPersistenceThreads() {
StorageCluster stc = parse(
"<cluster id=\"bees\">\n" +
" <documents/>" +
" <tuning>\n" +
" <persistence-threads count=\"7\"/>\n" +
" </tuning>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>",
new Flavor(new FlavorsConfig.Flavor.Builder().name("test-flavor").minCpuCores(9).build())
);
{
StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder();
stc.getConfig(builder);
StorFilestorConfig config = new StorFilestorConfig(builder);
assertEquals(7, config.num_threads());
assertFalse(config.enable_multibit_split_optimalization());
assertEquals(1, config.num_response_threads());
}
{
assertEquals(1, stc.getChildren().size());
StorageNode sn = stc.getChildren().values().iterator().next();
StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder();
sn.getConfig(builder);
StorFilestorConfig config = new StorFilestorConfig(builder);
assertEquals(7, config.num_threads());
}
}
@Test
public void testResponseThreads() {
StorageCluster stc = parse(
"<cluster id=\"bees\">\n" +
" <documents/>" +
" <tuning>\n" +
" <persistence-threads count=\"7\"/>\n" +
" </tuning>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>",
new Flavor(new FlavorsConfig.Flavor.Builder().name("test-flavor").minCpuCores(9).build())
);
StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder();
stc.getConfig(builder);
StorFilestorConfig config = new StorFilestorConfig(builder);
assertEquals(1, config.num_response_threads());
assertEquals(7, config.num_threads());
}
@Test
public void testPersistenceThreadsOld() {
StorageCluster stc = parse(
"<cluster id=\"bees\">\n" +
" <documents/>" +
" <tuning>\n" +
" <persistence-threads>\n" +
" <thread lowest-priority=\"VERY_LOW\" count=\"2\"/>\n" +
" <thread lowest-priority=\"VERY_HIGH\" count=\"1\"/>\n" +
" <thread count=\"1\"/>\n" +
" </persistence-threads>\n" +
" </tuning>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>",
new Flavor(new FlavorsConfig.Flavor.Builder().name("test-flavor").minCpuCores(9).build())
);
{
StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder();
stc.getConfig(builder);
StorFilestorConfig config = new StorFilestorConfig(builder);
assertEquals(4, config.num_threads());
assertFalse(config.enable_multibit_split_optimalization());
}
{
assertEquals(1, stc.getChildren().size());
StorageNode sn = stc.getChildren().values().iterator().next();
StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder();
sn.getConfig(builder);
StorFilestorConfig config = new StorFilestorConfig(builder);
assertEquals(4, config.num_threads());
}
}
@Test
public void testNoPersistenceThreads() {
StorageCluster stc = parse(
"<cluster id=\"bees\">\n" +
" <documents/>" +
" <tuning>\n" +
" </tuning>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>",
new Flavor(new FlavorsConfig.Flavor.Builder().name("test-flavor").minCpuCores(9).build())
);
{
StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder();
stc.getConfig(builder);
StorFilestorConfig config = new StorFilestorConfig(builder);
assertEquals(8, config.num_threads());
}
{
assertEquals(1, stc.getChildren().size());
StorageNode sn = stc.getChildren().values().iterator().next();
StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder();
sn.getConfig(builder);
StorFilestorConfig config = new StorFilestorConfig(builder);
assertEquals(9, config.num_threads());
}
}
@Test
public void integrity_checker_explicitly_disabled_when_not_running_with_vds_provider() {
StorIntegritycheckerConfig.Builder builder = new StorIntegritycheckerConfig.Builder();
parse(
"<cluster id=\"bees\">\n" +
" <documents/>" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>"
).getConfig(builder);
StorIntegritycheckerConfig config = new StorIntegritycheckerConfig(builder);
assertEquals("-------", config.weeklycycle());
}
@Test
public void testCapacity() {
String xml =
"<cluster id=\"storage\">\n" +
" <documents/>" +
" <group>\n" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
" <node distribution-key=\"1\" hostalias=\"mockhost\" capacity=\"1.5\"/>\n" +
" <node distribution-key=\"2\" hostalias=\"mockhost\" capacity=\"2.0\"/>\n" +
" </group>\n" +
"</cluster>";
ContentCluster cluster = ContentClusterUtils.createCluster(xml, new MockRoot());
for (int i = 0; i < 3; ++i) {
StorageNode node = cluster.getStorageNodes().getChildren().get("" + i);
StorServerConfig.Builder builder = new StorServerConfig.Builder();
cluster.getStorageNodes().getConfig(builder);
node.getConfig(builder);
StorServerConfig config = new StorServerConfig(builder);
assertEquals(1.0 + (double)i * 0.5, config.node_capacity(), 0.001);
}
}
@Test
public void testRootFolder() {
String xml =
"<cluster id=\"storage\">\n" +
" <documents/>" +
" <group>\n" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
" </group>\n" +
"</cluster>";
ContentCluster cluster = ContentClusterUtils.createCluster(xml, new MockRoot());
StorageNode node = cluster.getStorageNodes().getChildren().get("0");
{
StorServerConfig.Builder builder = new StorServerConfig.Builder();
cluster.getStorageNodes().getConfig(builder);
node.getConfig(builder);
StorServerConfig config = new StorServerConfig(builder);
assertEquals(getDefaults().underVespaHome("var/db/vespa/search/storage/storage/0"), config.root_folder());
}
{
StorServerConfig.Builder builder = new StorServerConfig.Builder();
cluster.getDistributorNodes().getConfig(builder);
cluster.getDistributorNodes().getChildren().get("0").getConfig(builder);
StorServerConfig config = new StorServerConfig(builder);
assertEquals(getDefaults().underVespaHome("var/db/vespa/search/storage/distributor/0"), config.root_folder());
}
}
@Test
public void testGenericPersistenceTuning() {
String xml =
"<cluster id=\"storage\">\n" +
"<documents/>" +
"<engine>\n" +
" <fail-partition-on-error>true</fail-partition-on-error>\n" +
" <revert-time>34m</revert-time>\n" +
" <recovery-time>5d</recovery-time>\n" +
"</engine>" +
" <group>\n" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
" </group>\n" +
"</cluster>";
ContentCluster cluster = ContentClusterUtils.createCluster(xml, new MockRoot());
PersistenceConfig.Builder builder = new PersistenceConfig.Builder();
cluster.getStorageNodes().getConfig(builder);
PersistenceConfig config = new PersistenceConfig(builder);
assertTrue(config.fail_partition_on_error());
assertEquals(34 * 60, config.revert_time_period());
assertEquals(5 * 24 * 60 * 60, config.keep_remove_time_period());
}
@Test
public void requireThatUserDoesNotSpecifyBothGroupAndNodes() {
String xml =
"<cluster id=\"storage\">\n" +
"<documents/>\n" +
"<engine>\n" +
" <fail-partition-on-error>true</fail-partition-on-error>\n" +
" <revert-time>34m</revert-time>\n" +
" <recovery-time>5d</recovery-time>\n" +
"</engine>" +
" <group>\n" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
" </group>\n" +
" <nodes>\n" +
" <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" +
" </nodes>\n" +
"</cluster>";
try {
final MockRoot root = new MockRoot();
root.getDeployState().getDocumentModel().getDocumentManager().add(
new NewDocumentType(new NewDocumentType.Name("music"))
);
ContentClusterUtils.createCluster(xml, root);
fail("Did not fail when having both group and nodes");
} catch (RuntimeException e) {
e.printStackTrace();
assertEquals("Both group and nodes exists, only one of these tags is legal", e.getMessage());
}
}
@Test
public void requireThatGroupNamesMustBeUniqueAmongstSiblings() {
String xml =
"<cluster id=\"storage\">\n" +
" <redundancy>2</redundancy>" +
" <documents/>\n" +
" <group>\n" +
" <distribution partitions=\"*\"/>\n" +
" <group distribution-key=\"0\" name=\"bar\">\n" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
" </group>\n" +
" <group distribution-key=\"0\" name=\"bar\">\n" +
" <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" +
" </group>\n" +
" </group>\n" +
"</cluster>";
try {
ContentClusterUtils.createCluster(xml, new MockRoot());
fail("Did not get exception with duplicate group names");
} catch (RuntimeException e) {
assertEquals("Cluster 'storage' has multiple groups with name 'bar' in the same subgroup. " +
"Group sibling names must be unique.", e.getMessage());
}
}
@Test
public void requireThatGroupNamesCanBeDuplicatedAcrossLevels() {
String xml =
"<cluster id=\"storage\">\n" +
" <redundancy>2</redundancy>" +
"<documents/>\n" +
" <group>\n" +
" <distribution partitions=\"*\"/>\n" +
" <group distribution-key=\"0\" name=\"bar\">\n" +
" <group distribution-key=\"0\" name=\"foo\">\n" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
" </group>\n" +
" </group>\n" +
" <group distribution-key=\"0\" name=\"foo\">\n" +
" <group distribution-key=\"0\" name=\"bar\">\n" +
" <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" +
" </group>\n" +
" </group>\n" +
" </group>\n" +
"</cluster>";
ContentClusterUtils.createCluster(xml, new MockRoot());
}
@Test
public void requireThatNestedGroupsRequireDistribution() {
String xml =
"<cluster id=\"storage\">\n" +
"<documents/>\n" +
" <group>\n" +
" <group distribution-key=\"0\" name=\"bar\">\n" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
" </group>\n" +
" <group distribution-key=\"0\" name=\"baz\">\n" +
" <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" +
" </group>\n" +
" </group>\n" +
"</cluster>";
try {
ContentClusterUtils.createCluster(xml, new MockRoot());
fail("Did not get exception with missing distribution element");
} catch (RuntimeException e) {
assertEquals("'distribution' attribute is required with multiple subgroups", e.getMessage());
}
}
} | class StorageClusterTest {
StorageCluster parse(String xml, Flavor flavor) {
MockRoot root = new MockRoot("", new DeployState.Builder()
.applicationPackage(new MockApplicationPackage.Builder().build())
.modelHostProvisioner(new SingleNodeProvisioner(flavor)).build());
return parse(xml, root);
}
StorageCluster parse(String xml) {
MockRoot root = new MockRoot();
return parse(xml, root);
}
StorageCluster parse(String xml, MockRoot root) {
root.getDeployState().getDocumentModel().getDocumentManager().add(
new NewDocumentType(new NewDocumentType.Name("music"))
);
root.getDeployState().getDocumentModel().getDocumentManager().add(
new NewDocumentType(new NewDocumentType.Name("movies"))
);
ContentCluster cluster = ContentClusterUtils.createCluster(xml, root);
root.freezeModelTopology();
return cluster.getStorageNodes();
}
@Test
@Test
public void testCommunicationManagerDefaults() {
StorageCluster storage = parse("<content id=\"foofighters\"><documents/>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</content>\n");
StorCommunicationmanagerConfig.Builder builder = new StorCommunicationmanagerConfig.Builder();
storage.getChildren().get("0").getConfig(builder);
StorCommunicationmanagerConfig config = new StorCommunicationmanagerConfig(builder);
assertFalse(config.mbus().dispatch_on_encode());
assertFalse(config.mbus().dispatch_on_decode());
assertEquals(4, config.mbus().num_threads());
assertEquals(StorCommunicationmanagerConfig.Mbus.Optimize_for.LATENCY, config.mbus().optimize_for());
}
@Test
public void testMerges() {
StorServerConfig.Builder builder = new StorServerConfig.Builder();
parse("" +
"<content id=\"foofighters\">\n" +
" <documents/>" +
" <tuning>" +
" <merges max-per-node=\"1K\" max-queue-size=\"10K\"/>\n" +
" </tuning>" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</content>"
).getConfig(builder);
StorServerConfig config = new StorServerConfig(builder);
assertEquals(1024, config.max_merges_per_node());
assertEquals(1024*10, config.max_merge_queue_size());
}
@Test
public void testVisitors() {
StorVisitorConfig.Builder builder = new StorVisitorConfig.Builder();
parse(
"<cluster id=\"bees\">\n" +
" <documents/>" +
" <tuning>\n" +
" <visitors thread-count=\"7\" max-queue-size=\"1000\">\n" +
" <max-concurrent fixed=\"42\" variable=\"100\"/>\n" +
" </visitors>\n" +
" </tuning>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>"
).getConfig(builder);
StorVisitorConfig config = new StorVisitorConfig(builder);
assertEquals(42, config.maxconcurrentvisitors_fixed());
assertEquals(100, config.maxconcurrentvisitors_variable());
assertEquals(7, config.visitorthreads());
assertEquals(1000, config.maxvisitorqueuesize());
}
@Test
public void testPersistenceThreads() {
StorageCluster stc = parse(
"<cluster id=\"bees\">\n" +
" <documents/>" +
" <tuning>\n" +
" <persistence-threads count=\"7\"/>\n" +
" </tuning>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>",
new Flavor(new FlavorsConfig.Flavor.Builder().name("test-flavor").minCpuCores(9).build())
);
{
StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder();
stc.getConfig(builder);
StorFilestorConfig config = new StorFilestorConfig(builder);
assertEquals(7, config.num_threads());
assertFalse(config.enable_multibit_split_optimalization());
assertEquals(1, config.num_response_threads());
}
{
assertEquals(1, stc.getChildren().size());
StorageNode sn = stc.getChildren().values().iterator().next();
StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder();
sn.getConfig(builder);
StorFilestorConfig config = new StorFilestorConfig(builder);
assertEquals(7, config.num_threads());
}
}
@Test
public void testResponseThreads() {
StorageCluster stc = parse(
"<cluster id=\"bees\">\n" +
" <documents/>" +
" <tuning>\n" +
" <persistence-threads count=\"7\"/>\n" +
" </tuning>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>",
new Flavor(new FlavorsConfig.Flavor.Builder().name("test-flavor").minCpuCores(9).build())
);
StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder();
stc.getConfig(builder);
StorFilestorConfig config = new StorFilestorConfig(builder);
assertEquals(1, config.num_response_threads());
assertEquals(7, config.num_threads());
}
@Test
public void testPersistenceThreadsOld() {
StorageCluster stc = parse(
"<cluster id=\"bees\">\n" +
" <documents/>" +
" <tuning>\n" +
" <persistence-threads>\n" +
" <thread lowest-priority=\"VERY_LOW\" count=\"2\"/>\n" +
" <thread lowest-priority=\"VERY_HIGH\" count=\"1\"/>\n" +
" <thread count=\"1\"/>\n" +
" </persistence-threads>\n" +
" </tuning>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>",
new Flavor(new FlavorsConfig.Flavor.Builder().name("test-flavor").minCpuCores(9).build())
);
{
StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder();
stc.getConfig(builder);
StorFilestorConfig config = new StorFilestorConfig(builder);
assertEquals(4, config.num_threads());
assertFalse(config.enable_multibit_split_optimalization());
}
{
assertEquals(1, stc.getChildren().size());
StorageNode sn = stc.getChildren().values().iterator().next();
StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder();
sn.getConfig(builder);
StorFilestorConfig config = new StorFilestorConfig(builder);
assertEquals(4, config.num_threads());
}
}
@Test
public void testNoPersistenceThreads() {
StorageCluster stc = parse(
"<cluster id=\"bees\">\n" +
" <documents/>" +
" <tuning>\n" +
" </tuning>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>",
new Flavor(new FlavorsConfig.Flavor.Builder().name("test-flavor").minCpuCores(9).build())
);
{
StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder();
stc.getConfig(builder);
StorFilestorConfig config = new StorFilestorConfig(builder);
assertEquals(8, config.num_threads());
}
{
assertEquals(1, stc.getChildren().size());
StorageNode sn = stc.getChildren().values().iterator().next();
StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder();
sn.getConfig(builder);
StorFilestorConfig config = new StorFilestorConfig(builder);
assertEquals(9, config.num_threads());
}
}
@Test
public void integrity_checker_explicitly_disabled_when_not_running_with_vds_provider() {
StorIntegritycheckerConfig.Builder builder = new StorIntegritycheckerConfig.Builder();
parse(
"<cluster id=\"bees\">\n" +
" <documents/>" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>"
).getConfig(builder);
StorIntegritycheckerConfig config = new StorIntegritycheckerConfig(builder);
assertEquals("-------", config.weeklycycle());
}
@Test
public void testCapacity() {
String xml =
"<cluster id=\"storage\">\n" +
" <documents/>" +
" <group>\n" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
" <node distribution-key=\"1\" hostalias=\"mockhost\" capacity=\"1.5\"/>\n" +
" <node distribution-key=\"2\" hostalias=\"mockhost\" capacity=\"2.0\"/>\n" +
" </group>\n" +
"</cluster>";
ContentCluster cluster = ContentClusterUtils.createCluster(xml, new MockRoot());
for (int i = 0; i < 3; ++i) {
StorageNode node = cluster.getStorageNodes().getChildren().get("" + i);
StorServerConfig.Builder builder = new StorServerConfig.Builder();
cluster.getStorageNodes().getConfig(builder);
node.getConfig(builder);
StorServerConfig config = new StorServerConfig(builder);
assertEquals(1.0 + (double)i * 0.5, config.node_capacity(), 0.001);
}
}
@Test
public void testRootFolder() {
String xml =
"<cluster id=\"storage\">\n" +
" <documents/>" +
" <group>\n" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
" </group>\n" +
"</cluster>";
ContentCluster cluster = ContentClusterUtils.createCluster(xml, new MockRoot());
StorageNode node = cluster.getStorageNodes().getChildren().get("0");
{
StorServerConfig.Builder builder = new StorServerConfig.Builder();
cluster.getStorageNodes().getConfig(builder);
node.getConfig(builder);
StorServerConfig config = new StorServerConfig(builder);
assertEquals(getDefaults().underVespaHome("var/db/vespa/search/storage/storage/0"), config.root_folder());
}
{
StorServerConfig.Builder builder = new StorServerConfig.Builder();
cluster.getDistributorNodes().getConfig(builder);
cluster.getDistributorNodes().getChildren().get("0").getConfig(builder);
StorServerConfig config = new StorServerConfig(builder);
assertEquals(getDefaults().underVespaHome("var/db/vespa/search/storage/distributor/0"), config.root_folder());
}
}
@Test
public void testGenericPersistenceTuning() {
String xml =
"<cluster id=\"storage\">\n" +
"<documents/>" +
"<engine>\n" +
" <fail-partition-on-error>true</fail-partition-on-error>\n" +
" <revert-time>34m</revert-time>\n" +
" <recovery-time>5d</recovery-time>\n" +
"</engine>" +
" <group>\n" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
" </group>\n" +
"</cluster>";
ContentCluster cluster = ContentClusterUtils.createCluster(xml, new MockRoot());
PersistenceConfig.Builder builder = new PersistenceConfig.Builder();
cluster.getStorageNodes().getConfig(builder);
PersistenceConfig config = new PersistenceConfig(builder);
assertTrue(config.fail_partition_on_error());
assertEquals(34 * 60, config.revert_time_period());
assertEquals(5 * 24 * 60 * 60, config.keep_remove_time_period());
}
@Test
public void requireThatUserDoesNotSpecifyBothGroupAndNodes() {
String xml =
"<cluster id=\"storage\">\n" +
"<documents/>\n" +
"<engine>\n" +
" <fail-partition-on-error>true</fail-partition-on-error>\n" +
" <revert-time>34m</revert-time>\n" +
" <recovery-time>5d</recovery-time>\n" +
"</engine>" +
" <group>\n" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
" </group>\n" +
" <nodes>\n" +
" <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" +
" </nodes>\n" +
"</cluster>";
try {
final MockRoot root = new MockRoot();
root.getDeployState().getDocumentModel().getDocumentManager().add(
new NewDocumentType(new NewDocumentType.Name("music"))
);
ContentClusterUtils.createCluster(xml, root);
fail("Did not fail when having both group and nodes");
} catch (RuntimeException e) {
e.printStackTrace();
assertEquals("Both group and nodes exists, only one of these tags is legal", e.getMessage());
}
}
@Test
public void requireThatGroupNamesMustBeUniqueAmongstSiblings() {
String xml =
"<cluster id=\"storage\">\n" +
" <redundancy>2</redundancy>" +
" <documents/>\n" +
" <group>\n" +
" <distribution partitions=\"*\"/>\n" +
" <group distribution-key=\"0\" name=\"bar\">\n" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
" </group>\n" +
" <group distribution-key=\"0\" name=\"bar\">\n" +
" <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" +
" </group>\n" +
" </group>\n" +
"</cluster>";
try {
ContentClusterUtils.createCluster(xml, new MockRoot());
fail("Did not get exception with duplicate group names");
} catch (RuntimeException e) {
assertEquals("Cluster 'storage' has multiple groups with name 'bar' in the same subgroup. " +
"Group sibling names must be unique.", e.getMessage());
}
}
@Test
public void requireThatGroupNamesCanBeDuplicatedAcrossLevels() {
String xml =
"<cluster id=\"storage\">\n" +
" <redundancy>2</redundancy>" +
"<documents/>\n" +
" <group>\n" +
" <distribution partitions=\"*\"/>\n" +
" <group distribution-key=\"0\" name=\"bar\">\n" +
" <group distribution-key=\"0\" name=\"foo\">\n" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
" </group>\n" +
" </group>\n" +
" <group distribution-key=\"0\" name=\"foo\">\n" +
" <group distribution-key=\"0\" name=\"bar\">\n" +
" <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" +
" </group>\n" +
" </group>\n" +
" </group>\n" +
"</cluster>";
ContentClusterUtils.createCluster(xml, new MockRoot());
}
@Test
public void requireThatNestedGroupsRequireDistribution() {
String xml =
"<cluster id=\"storage\">\n" +
"<documents/>\n" +
" <group>\n" +
" <group distribution-key=\"0\" name=\"bar\">\n" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
" </group>\n" +
" <group distribution-key=\"0\" name=\"baz\">\n" +
" <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" +
" </group>\n" +
" </group>\n" +
"</cluster>";
try {
ContentClusterUtils.createCluster(xml, new MockRoot());
fail("Did not get exception with missing distribution element");
} catch (RuntimeException e) {
assertEquals("'distribution' attribute is required with multiple subgroups", e.getMessage());
}
}
} |
```suggestion ``` | public void testApplicationSerialization() {
List<Cluster> clusters = new ArrayList<>();
clusters.add(new Cluster(ClusterSpec.Id.from("c1"),
new ClusterResources( 8, 4, new NodeResources(1, 2, 3, 4)),
new ClusterResources(12, 6, new NodeResources(3, 6, 21, 24)),
Optional.empty()));
clusters.add(new Cluster(ClusterSpec.Id.from("c2"),
new ClusterResources( 8, 4, new NodeResources(1, 2, 3, 4)),
new ClusterResources(14, 7, new NodeResources(3, 6, 21, 24)),
Optional.of(new ClusterResources(10, 5, new NodeResources(2, 4, 14, 16)))));
Application original = new Application(ApplicationId.from("myTenant", "myApplication", "myInstance"),
clusters);
Application serialized = ApplicationSerializer.fromJson(ApplicationSerializer.toJson(original));
assertNotSame(original, serialized);
System.out.println("original id: " + original.id());
System.out.println("serialized id: " + serialized.id());
assertEquals(original, serialized);
assertEquals(original.id(), serialized.id());
assertEquals(original.clusters(), serialized.clusters());
for (Cluster originalCluster : original.clusters().values()) {
Cluster serializedCluster = serialized.clusters().get(originalCluster.id());
assertNotNull(serializedCluster);
assertNotSame(originalCluster, serializedCluster);
assertEquals(originalCluster, serializedCluster);
assertEquals(originalCluster.id(), serializedCluster.id());
assertEquals(originalCluster.minResources(), serializedCluster.minResources());
assertEquals(originalCluster.maxResources(), serializedCluster.maxResources());
assertEquals(originalCluster.targetResources(), serializedCluster.targetResources());
}
} | System.out.println("serialized id: " + serialized.id()); | public void testApplicationSerialization() {
List<Cluster> clusters = new ArrayList<>();
clusters.add(new Cluster(ClusterSpec.Id.from("c1"),
new ClusterResources( 8, 4, new NodeResources(1, 2, 3, 4)),
new ClusterResources(12, 6, new NodeResources(3, 6, 21, 24)),
Optional.empty()));
clusters.add(new Cluster(ClusterSpec.Id.from("c2"),
new ClusterResources( 8, 4, new NodeResources(1, 2, 3, 4)),
new ClusterResources(14, 7, new NodeResources(3, 6, 21, 24)),
Optional.of(new ClusterResources(10, 5, new NodeResources(2, 4, 14, 16)))));
Application original = new Application(ApplicationId.from("myTenant", "myApplication", "myInstance"),
clusters);
Application serialized = ApplicationSerializer.fromJson(ApplicationSerializer.toJson(original));
assertNotSame(original, serialized);
assertEquals(original, serialized);
assertEquals(original.id(), serialized.id());
assertEquals(original.clusters(), serialized.clusters());
for (Cluster originalCluster : original.clusters().values()) {
Cluster serializedCluster = serialized.clusters().get(originalCluster.id());
assertNotNull(serializedCluster);
assertNotSame(originalCluster, serializedCluster);
assertEquals(originalCluster, serializedCluster);
assertEquals(originalCluster.id(), serializedCluster.id());
assertEquals(originalCluster.minResources(), serializedCluster.minResources());
assertEquals(originalCluster.maxResources(), serializedCluster.maxResources());
assertEquals(originalCluster.targetResources(), serializedCluster.targetResources());
}
} | class ApplicationSerializerTest {
@Test
} | class ApplicationSerializerTest {
@Test
} |
Is this relevant for e.g. metrics-proxy, clustercontroller and logserver? If not, it should be moved to ApplicationContainerCluster. | public ContainerCluster(AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) {
super(parent, subId);
this.name = name;
this.isHostedVespa = stateIsHosted(deployState);
this.zone = (deployState != null) ? deployState.zone() : Zone.defaultZone();
componentGroup = new ComponentGroup<>(this, "component");
addComponent(new StatisticsComponent());
addSimpleComponent(AccessLog.class);
addSimpleComponent(ThreadPoolProvider.class);
addSimpleComponent(com.yahoo.concurrent.classlock.ClassLocking.class);
addSimpleComponent(SecurityFilterInvoker.class);
addSimpleComponent("com.yahoo.container.jdisc.metric.MetricConsumerProviderProvider");
addSimpleComponent("com.yahoo.container.jdisc.metric.MetricProvider");
addSimpleComponent("com.yahoo.container.jdisc.metric.MetricUpdater");
addSimpleComponent(com.yahoo.container.jdisc.LoggingRequestHandler.Context.class);
addSimpleComponent(com.yahoo.metrics.simple.MetricManager.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME);
addSimpleComponent(com.yahoo.metrics.simple.jdisc.JdiscMetricsFactory.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME);
addSimpleComponent("com.yahoo.container.jdisc.state.StateMonitor");
addSimpleComponent("com.yahoo.container.jdisc.ContainerThreadFactory");
addSimpleComponent("com.yahoo.container.handler.VipStatus");
addSimpleComponent("ai.vespa.cloud.SystemInfo");
addSimpleComponent(com.yahoo.container.handler.ClustersStatus.class.getName());
addJaxProviders();
} | addSimpleComponent("ai.vespa.cloud.SystemInfo"); | public ContainerCluster(AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) {
super(parent, subId);
this.name = name;
this.isHostedVespa = stateIsHosted(deployState);
this.zone = (deployState != null) ? deployState.zone() : Zone.defaultZone();
componentGroup = new ComponentGroup<>(this, "component");
addComponent(new StatisticsComponent());
addSimpleComponent(AccessLog.class);
addSimpleComponent(ThreadPoolProvider.class);
addSimpleComponent(com.yahoo.concurrent.classlock.ClassLocking.class);
addSimpleComponent(SecurityFilterInvoker.class);
addSimpleComponent("com.yahoo.container.jdisc.metric.MetricConsumerProviderProvider");
addSimpleComponent("com.yahoo.container.jdisc.metric.MetricProvider");
addSimpleComponent("com.yahoo.container.jdisc.metric.MetricUpdater");
addSimpleComponent(com.yahoo.container.jdisc.LoggingRequestHandler.Context.class);
addSimpleComponent(com.yahoo.metrics.simple.MetricManager.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME);
addSimpleComponent(com.yahoo.metrics.simple.jdisc.JdiscMetricsFactory.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME);
addSimpleComponent("com.yahoo.container.jdisc.state.StateMonitor");
addSimpleComponent("com.yahoo.container.jdisc.ContainerThreadFactory");
addSimpleComponent("com.yahoo.container.handler.VipStatus");
addSimpleComponent(com.yahoo.container.handler.ClustersStatus.class.getName());
addJaxProviders();
} | class ContainerCluster<CONTAINER extends Container>
extends AbstractConfigProducer<AbstractConfigProducer<?>>
implements
ComponentsConfig.Producer,
JdiscBindingsConfig.Producer,
DocumentmanagerConfig.Producer,
ContainerDocumentConfig.Producer,
HealthMonitorConfig.Producer,
ApplicationMetadataConfig.Producer,
BundlesConfig.Producer,
IndexInfoConfig.Producer,
IlscriptsConfig.Producer,
SchemamappingConfig.Producer,
QrSearchersConfig.Producer,
QrStartConfig.Producer,
QueryProfilesConfig.Producer,
PageTemplatesConfig.Producer,
SemanticRulesConfig.Producer,
DocprocConfig.Producer,
ClusterInfoConfig.Producer,
RoutingProviderConfig.Producer,
ConfigserverConfig.Producer,
ThreadpoolConfig.Producer
{
/**
* URI prefix used for internal, usually programmatic, APIs. URIs using this
* prefix should never considered available for direct use by customers, and
* normal compatibility concerns only applies to libraries using the URIs in
* question, not contents served from the URIs themselves.
*/
public static final String RESERVED_URI_PREFIX = "reserved-for-internal-use";
public static final String APPLICATION_STATUS_HANDLER_CLASS = "com.yahoo.container.handler.observability.ApplicationStatusHandler";
public static final String BINDINGS_OVERVIEW_HANDLER_CLASS = BindingsOverviewHandler.class.getName();
public static final String LOG_HANDLER_CLASS = com.yahoo.container.handler.LogHandler.class.getName();
public static final String DEFAULT_LINGUISTICS_PROVIDER = "com.yahoo.language.provider.DefaultLinguisticsProvider";
public static final String CMS = "-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1";
public static final String G1GC = "-XX:+UseG1GC -XX:MaxTenuringThreshold=15";
public static final String STATE_HANDLER_CLASS = "com.yahoo.container.jdisc.state.StateHandler";
public static final String STATE_HANDLER_BINDING_1 = "http:
public static final String STATE_HANDLER_BINDING_2 = STATE_HANDLER_BINDING_1 + "/*";
public static final String ROOT_HANDLER_PATH = "/";
public static final String ROOT_HANDLER_BINDING = "http:
public static final String VIP_HANDLER_BINDING = "http:
private final String name;
protected List<CONTAINER> containers = new ArrayList<>();
private Http http;
private ProcessingChains processingChains;
private ContainerSearch containerSearch;
private ContainerDocproc containerDocproc;
private ContainerDocumentApi containerDocumentApi;
private SecretStore secretStore;
private boolean rpcServerEnabled = true;
private boolean httpServerEnabled = true;
private final Set<Path> platformBundles = new LinkedHashSet<>();
private final List<String> serviceAliases = new ArrayList<>();
private final List<String> endpointAliases = new ArrayList<>();
private final ComponentGroup<Component<?, ?>> componentGroup;
private final boolean isHostedVespa;
private Map<String, String> concreteDocumentTypes = new LinkedHashMap<>();
private ApplicationMetaData applicationMetaData = null;
/** The zone this is deployed in, or the default zone if not on hosted Vespa */
private Zone zone;
private String hostClusterId = null;
private String jvmGCOptions = null;
private String environmentVars = null;
public void setZone(Zone zone) {
this.zone = zone;
}
public Zone getZone() {
return zone;
}
public void addDefaultHandlersWithVip() {
addDefaultHandlersExceptStatus();
addVipHandler();
}
public final void addDefaultHandlersExceptStatus() {
addDefaultRootHandler();
addMetricStateHandler();
addApplicationStatusHandler();
}
public void addMetricStateHandler() {
Handler<AbstractConfigProducer<?>> stateHandler = new Handler<>(
new ComponentModel(STATE_HANDLER_CLASS, null, null, null));
stateHandler.addServerBindings(STATE_HANDLER_BINDING_1, STATE_HANDLER_BINDING_2);
addComponent(stateHandler);
}
public void addDefaultRootHandler() {
Handler<AbstractConfigProducer<?>> handler = new Handler<>(
new ComponentModel(BundleInstantiationSpecification.getFromStrings(
BINDINGS_OVERVIEW_HANDLER_CLASS, null, null), null));
handler.addServerBindings(ROOT_HANDLER_BINDING);
addComponent(handler);
}
public void addApplicationStatusHandler() {
Handler<AbstractConfigProducer<?>> statusHandler = new Handler<>(
new ComponentModel(BundleInstantiationSpecification.getInternalHandlerSpecificationFromStrings(
APPLICATION_STATUS_HANDLER_CLASS, null), null));
statusHandler.addServerBindings("http:
addComponent(statusHandler);
}
public void addVipHandler() {
Handler<?> vipHandler = Handler.fromClassName(FileStatusHandlerComponent.CLASS);
vipHandler.addServerBindings(VIP_HANDLER_BINDING);
addComponent(vipHandler);
}
@SuppressWarnings("deprecation")
private void addJaxProviders() {
addSimpleComponent(com.yahoo.container.xml.providers.DatatypeFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.DocumentBuilderFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.SAXParserFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.SchemaFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.TransformerFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.XMLEventFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.XMLInputFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.XMLOutputFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.XPathFactoryProvider.class);
}
public final void addComponent(Component<?, ?> component) {
componentGroup.addComponent(component);
}
public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) {
addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec)));
}
/**
* Removes a component by id
*
* @return the removed component, or null if it was not present
*/
@SuppressWarnings("unused")
public Component removeComponent(ComponentId componentId) {
return componentGroup.removeComponent(componentId);
}
private void addSimpleComponent(Class<?> clazz) {
addSimpleComponent(clazz.getName());
}
protected void addSimpleComponent(String className) {
addComponent(new SimpleComponent(className));
}
public void prepare(DeployState deployState) {
applicationMetaData = deployState.getApplicationPackage().getMetaData();
doPrepare(deployState);
}
protected abstract void doPrepare(DeployState deployState);
public String getName() {
return name;
}
public List<CONTAINER> getContainers() {
return Collections.unmodifiableList(containers);
}
public void addContainer(CONTAINER container) {
container.setClusterName(name);
container.setProp("clustername", name)
.setProp("index", this.containers.size());
containers.add(container);
}
public void addContainers(Collection<CONTAINER> containers) {
containers.forEach(this::addContainer);
}
public void setProcessingChains(ProcessingChains processingChains, String... serverBindings) {
if (this.processingChains != null)
throw new IllegalStateException("ProcessingChains should only be set once.");
this.processingChains = processingChains;
ProcessingHandler<?> processingHandler = new ProcessingHandler<>(
processingChains,
"com.yahoo.processing.handler.ProcessingHandler");
for (String binding: serverBindings)
processingHandler.addServerBindings(binding);
addComponent(processingHandler);
}
ProcessingChains getProcessingChains() {
return processingChains;
}
public SearchChains getSearchChains() {
if (containerSearch == null)
throw new IllegalStateException("Search components not found in container cluster '" + getSubId() +
"': Add <search/> to the cluster in services.xml");
return containerSearch.getChains();
}
public ContainerSearch getSearch() {
return containerSearch;
}
public void setSearch(ContainerSearch containerSearch) {
this.containerSearch = containerSearch;
}
public void setHttp(Http http) {
this.http = http;
addChild(http);
}
public Http getHttp() {
return http;
}
public ContainerDocproc getDocproc() {
return containerDocproc;
}
public void setDocproc(ContainerDocproc containerDocproc) {
this.containerDocproc = containerDocproc;
}
public ContainerDocumentApi getDocumentApi() {
return containerDocumentApi;
}
public void setDocumentApi(ContainerDocumentApi containerDocumentApi) {
this.containerDocumentApi = containerDocumentApi;
}
public DocprocChains getDocprocChains() {
if (containerDocproc == null)
throw new IllegalStateException("Document processing components not found in container cluster '" + getSubId() +
"': Add <document-processing/> to the cluster in services.xml");
return containerDocproc.getChains();
}
@SuppressWarnings("unchecked")
public Collection<Handler<?>> getHandlers() {
return (Collection<Handler<?>>)(Collection)componentGroup.getComponents(Handler.class);
}
public void setSecretStore(SecretStore secretStore) {
this.secretStore = secretStore;
}
public Optional<SecretStore> getSecretStore() {
return Optional.ofNullable(secretStore);
}
public Map<ComponentId, Component<?, ?>> getComponentsMap() {
return componentGroup.getComponentMap();
}
/** Returns all components in this cluster (generic, handlers, chained) */
public Collection<Component<?, ?>> getAllComponents() {
List<Component<?, ?>> allComponents = new ArrayList<>();
recursivelyFindAllComponents(allComponents, this);
Collections.sort(allComponents);
return Collections.unmodifiableCollection(allComponents);
}
private void recursivelyFindAllComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) {
for (AbstractConfigProducer<?> child: current.getChildren().values()) {
if (child instanceof Component)
allComponents.add((Component<?, ?>) child);
if (!(child instanceof Container))
recursivelyFindAllComponents(allComponents, child);
}
}
@Override
public void getConfig(ComponentsConfig.Builder builder) {
builder.components.addAll(ComponentsConfigGenerator.generate(getAllComponents()));
builder.components(new ComponentsConfig.Components.Builder().id("com.yahoo.container.core.config.HandlersConfigurerDi$RegistriesHack"));
}
@Override
public void getConfig(JdiscBindingsConfig.Builder builder) {
builder.handlers.putAll(DiscBindingsConfigGenerator.generate(getHandlers()));
}
@Override
public void getConfig(DocumentmanagerConfig.Builder builder) {
if (containerDocproc != null && containerDocproc.isCompressDocuments())
builder.enablecompression(true);
}
@Override
public void getConfig(ContainerDocumentConfig.Builder builder) {
for (Map.Entry<String, String> e : concreteDocumentTypes.entrySet()) {
ContainerDocumentConfig.Doctype.Builder dtb = new ContainerDocumentConfig.Doctype.Builder();
dtb.type(e.getKey());
dtb.factorycomponent(e.getValue());
builder.doctype(dtb);
}
}
@Override
public void getConfig(HealthMonitorConfig.Builder builder) {
Monitoring monitoring = getMonitoringService();
if (monitoring != null) {
builder.snapshot_interval(monitoring.getIntervalSeconds());
}
}
@Override
public void getConfig(ApplicationMetadataConfig.Builder builder) {
if (applicationMetaData != null) {
builder.name(applicationMetaData.getApplicationId().application().value()).
user(applicationMetaData.getDeployedByUser()).
path(applicationMetaData.getDeployPath()).
timestamp(applicationMetaData.getDeployTimestamp()).
checksum(applicationMetaData.getChecksum()).
generation(applicationMetaData.getGeneration());
}
}
/**
* Adds a bundle present at a known location at the target container nodes.
*
* @param bundlePath usually an absolute path, e.g. '$VESPA_HOME/lib/jars/foo.jar'
*/
public final void addPlatformBundle(Path bundlePath) {
platformBundles.add(bundlePath);
}
@Override
public void getConfig(BundlesConfig.Builder builder) {
platformBundles.stream() .map(ContainerCluster::toFileReferenceString)
.forEach(builder::bundle);
}
private static String toFileReferenceString(Path path) {
return DISK_BUNDLE_PREFIX + path.toString();
}
@Override
public void getConfig(QrSearchersConfig.Builder builder) {
if (containerSearch != null) containerSearch.getConfig(builder);
}
@Override
public void getConfig(QrStartConfig.Builder builder) {
builder.jvm
.verbosegc(false)
.availableProcessors(2)
.compressedClassSpaceSize(32)
.minHeapsize(32)
.heapsize(512)
.heapSizeAsPercentageOfPhysicalMemory(0)
.gcopts(Objects.requireNonNullElse(jvmGCOptions, G1GC));
if (environmentVars != null) {
builder.qrs.env(environmentVars);
}
}
@Override
public void getConfig(DocprocConfig.Builder builder) {
if (containerDocproc != null) containerDocproc.getConfig(builder);
}
@Override
public void getConfig(PageTemplatesConfig.Builder builder) {
if (containerSearch != null) containerSearch.getConfig(builder);
}
@Override
public void getConfig(SemanticRulesConfig.Builder builder) {
if (containerSearch != null) containerSearch.getConfig(builder);
}
@Override
public void getConfig(QueryProfilesConfig.Builder builder) {
if (containerSearch != null) containerSearch.getConfig(builder);
}
@Override
public void getConfig(SchemamappingConfig.Builder builder) {
if (containerDocproc != null) containerDocproc.getConfig(builder);
}
@Override
public void getConfig(IndexInfoConfig.Builder builder) {
if (containerSearch != null) containerSearch.getConfig(builder);
}
public void initialize(Map<String, AbstractSearchCluster> clusterMap) {
if (containerSearch != null) containerSearch.connectSearchClusters(clusterMap);
}
public void addDefaultSearchAccessLog() {
addComponent(new AccessLogComponent(AccessLogComponent.AccessLogType.jsonAccessLog, getName(), isHostedVespa));
}
@Override
public void getConfig(IlscriptsConfig.Builder builder) {
List<AbstractSearchCluster> searchClusters = new ArrayList<>();
searchClusters.addAll(Content.getSearchClusters(getRoot().configModelRepo()));
for (AbstractSearchCluster searchCluster : searchClusters) {
searchCluster.getConfig(builder);
}
}
@Override
public void getConfig(ClusterInfoConfig.Builder builder) {
builder.clusterId(name);
builder.nodeCount(containers.size());
for (Service service : getDescendantServices()) {
builder.services.add(new ClusterInfoConfig.Services.Builder()
.index(Integer.parseInt(service.getServicePropertyString("index", "99999")))
.hostname(service.getHostName())
.ports(getPorts(service)));
}
}
/**
* Returns a config server config containing the right zone settings (and defaults for the rest).
* This is useful to allow applications to find out in which zone they are runnung by having the Zone
* object (which is constructed from this config) injected.
*/
@Override
public void getConfig(ConfigserverConfig.Builder builder) {
builder.system(zone.system().value());
builder.environment(zone.environment().value());
builder.region(zone.region().value());
}
private List<ClusterInfoConfig.Services.Ports.Builder> getPorts(Service service) {
List<ClusterInfoConfig.Services.Ports.Builder> builders = new ArrayList<>();
PortsMeta portsMeta = service.getPortsMeta();
for (int i = 0; i < portsMeta.getNumPorts(); i++) {
builders.add(new ClusterInfoConfig.Services.Ports.Builder()
.number(service.getRelativePort(i))
.tags(ApplicationConfigProducerRoot.getPortTags(portsMeta, i))
);
}
return builders;
}
public boolean isHostedVespa() {
return isHostedVespa;
}
@Override
public void getConfig(RoutingProviderConfig.Builder builder) {
builder.enabled(isHostedVespa);
}
public Map<String, String> concreteDocumentTypes() { return concreteDocumentTypes; }
/** The configured service aliases for the service in this cluster */
public List<String> serviceAliases() { return serviceAliases; }
/** The configured endpoint aliases (fqdn) for the service in this cluster */
public List<String> endpointAliases() { return endpointAliases; }
public void setHostClusterId(String clusterId) { hostClusterId = clusterId; }
/**
* Returns the id of the content cluster which hosts this container cluster, if any.
* This is only set with hosted clusters where this container cluster is set up to run on the nodes
* of a content cluster.
*/
public Optional<String> getHostClusterId() { return Optional.ofNullable(hostClusterId); }
public void setJvmGCOptions(String opts) { this.jvmGCOptions = opts; }
public void setEnvironmentVars(String environmentVars) { this.environmentVars = environmentVars; }
public Optional<String> getJvmGCOptions() { return Optional.ofNullable(jvmGCOptions); }
public final void setRpcServerEnabled(boolean rpcServerEnabled) { this.rpcServerEnabled = rpcServerEnabled; }
boolean rpcServerEnabled() { return rpcServerEnabled; }
boolean httpServerEnabled() { return httpServerEnabled; }
public void setHttpServerEnabled(boolean httpServerEnabled) { this.httpServerEnabled = httpServerEnabled; }
@Override
public String toString() {
return "container cluster '" + getName() + "'";
}
protected abstract boolean messageBusEnabled();
} | class ContainerCluster<CONTAINER extends Container>
extends AbstractConfigProducer<AbstractConfigProducer<?>>
implements
ComponentsConfig.Producer,
JdiscBindingsConfig.Producer,
DocumentmanagerConfig.Producer,
ContainerDocumentConfig.Producer,
HealthMonitorConfig.Producer,
ApplicationMetadataConfig.Producer,
BundlesConfig.Producer,
IndexInfoConfig.Producer,
IlscriptsConfig.Producer,
SchemamappingConfig.Producer,
QrSearchersConfig.Producer,
QrStartConfig.Producer,
QueryProfilesConfig.Producer,
PageTemplatesConfig.Producer,
SemanticRulesConfig.Producer,
DocprocConfig.Producer,
ClusterInfoConfig.Producer,
RoutingProviderConfig.Producer,
ConfigserverConfig.Producer,
ThreadpoolConfig.Producer
{
/**
* URI prefix used for internal, usually programmatic, APIs. URIs using this
* prefix should never considered available for direct use by customers, and
* normal compatibility concerns only applies to libraries using the URIs in
* question, not contents served from the URIs themselves.
*/
public static final String RESERVED_URI_PREFIX = "reserved-for-internal-use";
public static final String APPLICATION_STATUS_HANDLER_CLASS = "com.yahoo.container.handler.observability.ApplicationStatusHandler";
public static final String BINDINGS_OVERVIEW_HANDLER_CLASS = BindingsOverviewHandler.class.getName();
public static final String LOG_HANDLER_CLASS = com.yahoo.container.handler.LogHandler.class.getName();
public static final String DEFAULT_LINGUISTICS_PROVIDER = "com.yahoo.language.provider.DefaultLinguisticsProvider";
public static final String CMS = "-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1";
public static final String G1GC = "-XX:+UseG1GC -XX:MaxTenuringThreshold=15";
public static final String STATE_HANDLER_CLASS = "com.yahoo.container.jdisc.state.StateHandler";
public static final String STATE_HANDLER_BINDING_1 = "http:
public static final String STATE_HANDLER_BINDING_2 = STATE_HANDLER_BINDING_1 + "/*";
public static final String ROOT_HANDLER_PATH = "/";
public static final String ROOT_HANDLER_BINDING = "http:
public static final String VIP_HANDLER_BINDING = "http:
private final String name;
protected List<CONTAINER> containers = new ArrayList<>();
private Http http;
private ProcessingChains processingChains;
private ContainerSearch containerSearch;
private ContainerDocproc containerDocproc;
private ContainerDocumentApi containerDocumentApi;
private SecretStore secretStore;
private boolean rpcServerEnabled = true;
private boolean httpServerEnabled = true;
private final Set<Path> platformBundles = new LinkedHashSet<>();
private final List<String> serviceAliases = new ArrayList<>();
private final List<String> endpointAliases = new ArrayList<>();
private final ComponentGroup<Component<?, ?>> componentGroup;
private final boolean isHostedVespa;
private Map<String, String> concreteDocumentTypes = new LinkedHashMap<>();
private ApplicationMetaData applicationMetaData = null;
/** The zone this is deployed in, or the default zone if not on hosted Vespa */
private Zone zone;
private String hostClusterId = null;
private String jvmGCOptions = null;
private String environmentVars = null;
public void setZone(Zone zone) {
this.zone = zone;
}
public Zone getZone() {
return zone;
}
public void addDefaultHandlersWithVip() {
addDefaultHandlersExceptStatus();
addVipHandler();
}
public final void addDefaultHandlersExceptStatus() {
addDefaultRootHandler();
addMetricStateHandler();
addApplicationStatusHandler();
}
public void addMetricStateHandler() {
Handler<AbstractConfigProducer<?>> stateHandler = new Handler<>(
new ComponentModel(STATE_HANDLER_CLASS, null, null, null));
stateHandler.addServerBindings(STATE_HANDLER_BINDING_1, STATE_HANDLER_BINDING_2);
addComponent(stateHandler);
}
public void addDefaultRootHandler() {
Handler<AbstractConfigProducer<?>> handler = new Handler<>(
new ComponentModel(BundleInstantiationSpecification.getFromStrings(
BINDINGS_OVERVIEW_HANDLER_CLASS, null, null), null));
handler.addServerBindings(ROOT_HANDLER_BINDING);
addComponent(handler);
}
public void addApplicationStatusHandler() {
Handler<AbstractConfigProducer<?>> statusHandler = new Handler<>(
new ComponentModel(BundleInstantiationSpecification.getInternalHandlerSpecificationFromStrings(
APPLICATION_STATUS_HANDLER_CLASS, null), null));
statusHandler.addServerBindings("http:
addComponent(statusHandler);
}
public void addVipHandler() {
Handler<?> vipHandler = Handler.fromClassName(FileStatusHandlerComponent.CLASS);
vipHandler.addServerBindings(VIP_HANDLER_BINDING);
addComponent(vipHandler);
}
@SuppressWarnings("deprecation")
private void addJaxProviders() {
addSimpleComponent(com.yahoo.container.xml.providers.DatatypeFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.DocumentBuilderFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.SAXParserFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.SchemaFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.TransformerFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.XMLEventFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.XMLInputFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.XMLOutputFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.XPathFactoryProvider.class);
}
public final void addComponent(Component<?, ?> component) {
componentGroup.addComponent(component);
}
public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) {
addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec)));
}
/**
* Removes a component by id
*
* @return the removed component, or null if it was not present
*/
@SuppressWarnings("unused")
public Component removeComponent(ComponentId componentId) {
return componentGroup.removeComponent(componentId);
}
private void addSimpleComponent(Class<?> clazz) {
addSimpleComponent(clazz.getName());
}
protected void addSimpleComponent(String className) {
addComponent(new SimpleComponent(className));
}
public void prepare(DeployState deployState) {
applicationMetaData = deployState.getApplicationPackage().getMetaData();
doPrepare(deployState);
}
protected abstract void doPrepare(DeployState deployState);
public String getName() {
return name;
}
public List<CONTAINER> getContainers() {
return Collections.unmodifiableList(containers);
}
public void addContainer(CONTAINER container) {
container.setClusterName(name);
container.setProp("clustername", name)
.setProp("index", this.containers.size());
containers.add(container);
}
public void addContainers(Collection<CONTAINER> containers) {
containers.forEach(this::addContainer);
}
public void setProcessingChains(ProcessingChains processingChains, String... serverBindings) {
if (this.processingChains != null)
throw new IllegalStateException("ProcessingChains should only be set once.");
this.processingChains = processingChains;
ProcessingHandler<?> processingHandler = new ProcessingHandler<>(
processingChains,
"com.yahoo.processing.handler.ProcessingHandler");
for (String binding: serverBindings)
processingHandler.addServerBindings(binding);
addComponent(processingHandler);
}
ProcessingChains getProcessingChains() {
return processingChains;
}
public SearchChains getSearchChains() {
if (containerSearch == null)
throw new IllegalStateException("Search components not found in container cluster '" + getSubId() +
"': Add <search/> to the cluster in services.xml");
return containerSearch.getChains();
}
public ContainerSearch getSearch() {
return containerSearch;
}
public void setSearch(ContainerSearch containerSearch) {
this.containerSearch = containerSearch;
}
public void setHttp(Http http) {
this.http = http;
addChild(http);
}
public Http getHttp() {
return http;
}
public ContainerDocproc getDocproc() {
return containerDocproc;
}
public void setDocproc(ContainerDocproc containerDocproc) {
this.containerDocproc = containerDocproc;
}
public ContainerDocumentApi getDocumentApi() {
return containerDocumentApi;
}
public void setDocumentApi(ContainerDocumentApi containerDocumentApi) {
this.containerDocumentApi = containerDocumentApi;
}
public DocprocChains getDocprocChains() {
if (containerDocproc == null)
throw new IllegalStateException("Document processing components not found in container cluster '" + getSubId() +
"': Add <document-processing/> to the cluster in services.xml");
return containerDocproc.getChains();
}
@SuppressWarnings("unchecked")
public Collection<Handler<?>> getHandlers() {
return (Collection<Handler<?>>)(Collection)componentGroup.getComponents(Handler.class);
}
public void setSecretStore(SecretStore secretStore) {
this.secretStore = secretStore;
}
public Optional<SecretStore> getSecretStore() {
return Optional.ofNullable(secretStore);
}
public Map<ComponentId, Component<?, ?>> getComponentsMap() {
return componentGroup.getComponentMap();
}
/** Returns all components in this cluster (generic, handlers, chained) */
public Collection<Component<?, ?>> getAllComponents() {
List<Component<?, ?>> allComponents = new ArrayList<>();
recursivelyFindAllComponents(allComponents, this);
Collections.sort(allComponents);
return Collections.unmodifiableCollection(allComponents);
}
private void recursivelyFindAllComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) {
for (AbstractConfigProducer<?> child: current.getChildren().values()) {
if (child instanceof Component)
allComponents.add((Component<?, ?>) child);
if (!(child instanceof Container))
recursivelyFindAllComponents(allComponents, child);
}
}
@Override
public void getConfig(ComponentsConfig.Builder builder) {
builder.components.addAll(ComponentsConfigGenerator.generate(getAllComponents()));
builder.components(new ComponentsConfig.Components.Builder().id("com.yahoo.container.core.config.HandlersConfigurerDi$RegistriesHack"));
}
@Override
public void getConfig(JdiscBindingsConfig.Builder builder) {
builder.handlers.putAll(DiscBindingsConfigGenerator.generate(getHandlers()));
}
@Override
public void getConfig(DocumentmanagerConfig.Builder builder) {
if (containerDocproc != null && containerDocproc.isCompressDocuments())
builder.enablecompression(true);
}
@Override
public void getConfig(ContainerDocumentConfig.Builder builder) {
for (Map.Entry<String, String> e : concreteDocumentTypes.entrySet()) {
ContainerDocumentConfig.Doctype.Builder dtb = new ContainerDocumentConfig.Doctype.Builder();
dtb.type(e.getKey());
dtb.factorycomponent(e.getValue());
builder.doctype(dtb);
}
}
@Override
public void getConfig(HealthMonitorConfig.Builder builder) {
Monitoring monitoring = getMonitoringService();
if (monitoring != null) {
builder.snapshot_interval(monitoring.getIntervalSeconds());
}
}
@Override
public void getConfig(ApplicationMetadataConfig.Builder builder) {
if (applicationMetaData != null) {
builder.name(applicationMetaData.getApplicationId().application().value()).
user(applicationMetaData.getDeployedByUser()).
path(applicationMetaData.getDeployPath()).
timestamp(applicationMetaData.getDeployTimestamp()).
checksum(applicationMetaData.getChecksum()).
generation(applicationMetaData.getGeneration());
}
}
/**
* Adds a bundle present at a known location at the target container nodes.
*
* @param bundlePath usually an absolute path, e.g. '$VESPA_HOME/lib/jars/foo.jar'
*/
public final void addPlatformBundle(Path bundlePath) {
platformBundles.add(bundlePath);
}
@Override
public void getConfig(BundlesConfig.Builder builder) {
platformBundles.stream() .map(ContainerCluster::toFileReferenceString)
.forEach(builder::bundle);
}
private static String toFileReferenceString(Path path) {
return DISK_BUNDLE_PREFIX + path.toString();
}
@Override
public void getConfig(QrSearchersConfig.Builder builder) {
if (containerSearch != null) containerSearch.getConfig(builder);
}
@Override
public void getConfig(QrStartConfig.Builder builder) {
builder.jvm
.verbosegc(false)
.availableProcessors(2)
.compressedClassSpaceSize(32)
.minHeapsize(32)
.heapsize(512)
.heapSizeAsPercentageOfPhysicalMemory(0)
.gcopts(Objects.requireNonNullElse(jvmGCOptions, G1GC));
if (environmentVars != null) {
builder.qrs.env(environmentVars);
}
}
@Override
public void getConfig(DocprocConfig.Builder builder) {
if (containerDocproc != null) containerDocproc.getConfig(builder);
}
@Override
public void getConfig(PageTemplatesConfig.Builder builder) {
if (containerSearch != null) containerSearch.getConfig(builder);
}
@Override
public void getConfig(SemanticRulesConfig.Builder builder) {
if (containerSearch != null) containerSearch.getConfig(builder);
}
@Override
public void getConfig(QueryProfilesConfig.Builder builder) {
if (containerSearch != null) containerSearch.getConfig(builder);
}
@Override
public void getConfig(SchemamappingConfig.Builder builder) {
if (containerDocproc != null) containerDocproc.getConfig(builder);
}
@Override
public void getConfig(IndexInfoConfig.Builder builder) {
if (containerSearch != null) containerSearch.getConfig(builder);
}
public void initialize(Map<String, AbstractSearchCluster> clusterMap) {
if (containerSearch != null) containerSearch.connectSearchClusters(clusterMap);
}
public void addDefaultSearchAccessLog() {
addComponent(new AccessLogComponent(AccessLogComponent.AccessLogType.jsonAccessLog, getName(), isHostedVespa));
}
@Override
public void getConfig(IlscriptsConfig.Builder builder) {
List<AbstractSearchCluster> searchClusters = new ArrayList<>();
searchClusters.addAll(Content.getSearchClusters(getRoot().configModelRepo()));
for (AbstractSearchCluster searchCluster : searchClusters) {
searchCluster.getConfig(builder);
}
}
@Override
public void getConfig(ClusterInfoConfig.Builder builder) {
builder.clusterId(name);
builder.nodeCount(containers.size());
for (Service service : getDescendantServices()) {
builder.services.add(new ClusterInfoConfig.Services.Builder()
.index(Integer.parseInt(service.getServicePropertyString("index", "99999")))
.hostname(service.getHostName())
.ports(getPorts(service)));
}
}
/**
* Returns a config server config containing the right zone settings (and defaults for the rest).
* This is useful to allow applications to find out in which zone they are runnung by having the Zone
* object (which is constructed from this config) injected.
*/
@Override
public void getConfig(ConfigserverConfig.Builder builder) {
builder.system(zone.system().value());
builder.environment(zone.environment().value());
builder.region(zone.region().value());
}
private List<ClusterInfoConfig.Services.Ports.Builder> getPorts(Service service) {
List<ClusterInfoConfig.Services.Ports.Builder> builders = new ArrayList<>();
PortsMeta portsMeta = service.getPortsMeta();
for (int i = 0; i < portsMeta.getNumPorts(); i++) {
builders.add(new ClusterInfoConfig.Services.Ports.Builder()
.number(service.getRelativePort(i))
.tags(ApplicationConfigProducerRoot.getPortTags(portsMeta, i))
);
}
return builders;
}
public boolean isHostedVespa() {
return isHostedVespa;
}
@Override
public void getConfig(RoutingProviderConfig.Builder builder) {
builder.enabled(isHostedVespa);
}
public Map<String, String> concreteDocumentTypes() { return concreteDocumentTypes; }
/** The configured service aliases for the service in this cluster */
public List<String> serviceAliases() { return serviceAliases; }
/** The configured endpoint aliases (fqdn) for the service in this cluster */
public List<String> endpointAliases() { return endpointAliases; }
public void setHostClusterId(String clusterId) { hostClusterId = clusterId; }
/**
* Returns the id of the content cluster which hosts this container cluster, if any.
* This is only set with hosted clusters where this container cluster is set up to run on the nodes
* of a content cluster.
*/
public Optional<String> getHostClusterId() { return Optional.ofNullable(hostClusterId); }
public void setJvmGCOptions(String opts) { this.jvmGCOptions = opts; }
public void setEnvironmentVars(String environmentVars) { this.environmentVars = environmentVars; }
public Optional<String> getJvmGCOptions() { return Optional.ofNullable(jvmGCOptions); }
public final void setRpcServerEnabled(boolean rpcServerEnabled) { this.rpcServerEnabled = rpcServerEnabled; }
boolean rpcServerEnabled() { return rpcServerEnabled; }
boolean httpServerEnabled() { return httpServerEnabled; }
public void setHttpServerEnabled(boolean httpServerEnabled) { this.httpServerEnabled = httpServerEnabled; }
@Override
public String toString() {
return "container cluster '" + getName() + "'";
}
protected abstract boolean messageBusEnabled();
} |
Done - thanks! | public ContainerCluster(AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) {
super(parent, subId);
this.name = name;
this.isHostedVespa = stateIsHosted(deployState);
this.zone = (deployState != null) ? deployState.zone() : Zone.defaultZone();
componentGroup = new ComponentGroup<>(this, "component");
addComponent(new StatisticsComponent());
addSimpleComponent(AccessLog.class);
addSimpleComponent(ThreadPoolProvider.class);
addSimpleComponent(com.yahoo.concurrent.classlock.ClassLocking.class);
addSimpleComponent(SecurityFilterInvoker.class);
addSimpleComponent("com.yahoo.container.jdisc.metric.MetricConsumerProviderProvider");
addSimpleComponent("com.yahoo.container.jdisc.metric.MetricProvider");
addSimpleComponent("com.yahoo.container.jdisc.metric.MetricUpdater");
addSimpleComponent(com.yahoo.container.jdisc.LoggingRequestHandler.Context.class);
addSimpleComponent(com.yahoo.metrics.simple.MetricManager.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME);
addSimpleComponent(com.yahoo.metrics.simple.jdisc.JdiscMetricsFactory.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME);
addSimpleComponent("com.yahoo.container.jdisc.state.StateMonitor");
addSimpleComponent("com.yahoo.container.jdisc.ContainerThreadFactory");
addSimpleComponent("com.yahoo.container.handler.VipStatus");
addSimpleComponent("ai.vespa.cloud.SystemInfo");
addSimpleComponent(com.yahoo.container.handler.ClustersStatus.class.getName());
addJaxProviders();
} | addSimpleComponent("ai.vespa.cloud.SystemInfo"); | public ContainerCluster(AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) {
super(parent, subId);
this.name = name;
this.isHostedVespa = stateIsHosted(deployState);
this.zone = (deployState != null) ? deployState.zone() : Zone.defaultZone();
componentGroup = new ComponentGroup<>(this, "component");
addComponent(new StatisticsComponent());
addSimpleComponent(AccessLog.class);
addSimpleComponent(ThreadPoolProvider.class);
addSimpleComponent(com.yahoo.concurrent.classlock.ClassLocking.class);
addSimpleComponent(SecurityFilterInvoker.class);
addSimpleComponent("com.yahoo.container.jdisc.metric.MetricConsumerProviderProvider");
addSimpleComponent("com.yahoo.container.jdisc.metric.MetricProvider");
addSimpleComponent("com.yahoo.container.jdisc.metric.MetricUpdater");
addSimpleComponent(com.yahoo.container.jdisc.LoggingRequestHandler.Context.class);
addSimpleComponent(com.yahoo.metrics.simple.MetricManager.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME);
addSimpleComponent(com.yahoo.metrics.simple.jdisc.JdiscMetricsFactory.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME);
addSimpleComponent("com.yahoo.container.jdisc.state.StateMonitor");
addSimpleComponent("com.yahoo.container.jdisc.ContainerThreadFactory");
addSimpleComponent("com.yahoo.container.handler.VipStatus");
addSimpleComponent(com.yahoo.container.handler.ClustersStatus.class.getName());
addJaxProviders();
} | class ContainerCluster<CONTAINER extends Container>
extends AbstractConfigProducer<AbstractConfigProducer<?>>
implements
ComponentsConfig.Producer,
JdiscBindingsConfig.Producer,
DocumentmanagerConfig.Producer,
ContainerDocumentConfig.Producer,
HealthMonitorConfig.Producer,
ApplicationMetadataConfig.Producer,
BundlesConfig.Producer,
IndexInfoConfig.Producer,
IlscriptsConfig.Producer,
SchemamappingConfig.Producer,
QrSearchersConfig.Producer,
QrStartConfig.Producer,
QueryProfilesConfig.Producer,
PageTemplatesConfig.Producer,
SemanticRulesConfig.Producer,
DocprocConfig.Producer,
ClusterInfoConfig.Producer,
RoutingProviderConfig.Producer,
ConfigserverConfig.Producer,
ThreadpoolConfig.Producer
{
/**
* URI prefix used for internal, usually programmatic, APIs. URIs using this
* prefix should never considered available for direct use by customers, and
* normal compatibility concerns only applies to libraries using the URIs in
* question, not contents served from the URIs themselves.
*/
public static final String RESERVED_URI_PREFIX = "reserved-for-internal-use";
public static final String APPLICATION_STATUS_HANDLER_CLASS = "com.yahoo.container.handler.observability.ApplicationStatusHandler";
public static final String BINDINGS_OVERVIEW_HANDLER_CLASS = BindingsOverviewHandler.class.getName();
public static final String LOG_HANDLER_CLASS = com.yahoo.container.handler.LogHandler.class.getName();
public static final String DEFAULT_LINGUISTICS_PROVIDER = "com.yahoo.language.provider.DefaultLinguisticsProvider";
public static final String CMS = "-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1";
public static final String G1GC = "-XX:+UseG1GC -XX:MaxTenuringThreshold=15";
public static final String STATE_HANDLER_CLASS = "com.yahoo.container.jdisc.state.StateHandler";
public static final String STATE_HANDLER_BINDING_1 = "http:
public static final String STATE_HANDLER_BINDING_2 = STATE_HANDLER_BINDING_1 + "/*";
public static final String ROOT_HANDLER_PATH = "/";
public static final String ROOT_HANDLER_BINDING = "http:
public static final String VIP_HANDLER_BINDING = "http:
private final String name;
protected List<CONTAINER> containers = new ArrayList<>();
private Http http;
private ProcessingChains processingChains;
private ContainerSearch containerSearch;
private ContainerDocproc containerDocproc;
private ContainerDocumentApi containerDocumentApi;
private SecretStore secretStore;
private boolean rpcServerEnabled = true;
private boolean httpServerEnabled = true;
private final Set<Path> platformBundles = new LinkedHashSet<>();
private final List<String> serviceAliases = new ArrayList<>();
private final List<String> endpointAliases = new ArrayList<>();
private final ComponentGroup<Component<?, ?>> componentGroup;
private final boolean isHostedVespa;
private Map<String, String> concreteDocumentTypes = new LinkedHashMap<>();
private ApplicationMetaData applicationMetaData = null;
/** The zone this is deployed in, or the default zone if not on hosted Vespa */
private Zone zone;
private String hostClusterId = null;
private String jvmGCOptions = null;
private String environmentVars = null;
public void setZone(Zone zone) {
this.zone = zone;
}
public Zone getZone() {
return zone;
}
public void addDefaultHandlersWithVip() {
addDefaultHandlersExceptStatus();
addVipHandler();
}
public final void addDefaultHandlersExceptStatus() {
addDefaultRootHandler();
addMetricStateHandler();
addApplicationStatusHandler();
}
public void addMetricStateHandler() {
Handler<AbstractConfigProducer<?>> stateHandler = new Handler<>(
new ComponentModel(STATE_HANDLER_CLASS, null, null, null));
stateHandler.addServerBindings(STATE_HANDLER_BINDING_1, STATE_HANDLER_BINDING_2);
addComponent(stateHandler);
}
public void addDefaultRootHandler() {
Handler<AbstractConfigProducer<?>> handler = new Handler<>(
new ComponentModel(BundleInstantiationSpecification.getFromStrings(
BINDINGS_OVERVIEW_HANDLER_CLASS, null, null), null));
handler.addServerBindings(ROOT_HANDLER_BINDING);
addComponent(handler);
}
public void addApplicationStatusHandler() {
Handler<AbstractConfigProducer<?>> statusHandler = new Handler<>(
new ComponentModel(BundleInstantiationSpecification.getInternalHandlerSpecificationFromStrings(
APPLICATION_STATUS_HANDLER_CLASS, null), null));
statusHandler.addServerBindings("http:
addComponent(statusHandler);
}
public void addVipHandler() {
Handler<?> vipHandler = Handler.fromClassName(FileStatusHandlerComponent.CLASS);
vipHandler.addServerBindings(VIP_HANDLER_BINDING);
addComponent(vipHandler);
}
@SuppressWarnings("deprecation")
private void addJaxProviders() {
addSimpleComponent(com.yahoo.container.xml.providers.DatatypeFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.DocumentBuilderFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.SAXParserFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.SchemaFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.TransformerFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.XMLEventFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.XMLInputFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.XMLOutputFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.XPathFactoryProvider.class);
}
public final void addComponent(Component<?, ?> component) {
componentGroup.addComponent(component);
}
public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) {
addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec)));
}
/**
* Removes a component by id
*
* @return the removed component, or null if it was not present
*/
@SuppressWarnings("unused")
public Component removeComponent(ComponentId componentId) {
return componentGroup.removeComponent(componentId);
}
private void addSimpleComponent(Class<?> clazz) {
addSimpleComponent(clazz.getName());
}
protected void addSimpleComponent(String className) {
addComponent(new SimpleComponent(className));
}
public void prepare(DeployState deployState) {
applicationMetaData = deployState.getApplicationPackage().getMetaData();
doPrepare(deployState);
}
protected abstract void doPrepare(DeployState deployState);
public String getName() {
return name;
}
public List<CONTAINER> getContainers() {
return Collections.unmodifiableList(containers);
}
public void addContainer(CONTAINER container) {
container.setClusterName(name);
container.setProp("clustername", name)
.setProp("index", this.containers.size());
containers.add(container);
}
public void addContainers(Collection<CONTAINER> containers) {
containers.forEach(this::addContainer);
}
public void setProcessingChains(ProcessingChains processingChains, String... serverBindings) {
if (this.processingChains != null)
throw new IllegalStateException("ProcessingChains should only be set once.");
this.processingChains = processingChains;
ProcessingHandler<?> processingHandler = new ProcessingHandler<>(
processingChains,
"com.yahoo.processing.handler.ProcessingHandler");
for (String binding: serverBindings)
processingHandler.addServerBindings(binding);
addComponent(processingHandler);
}
ProcessingChains getProcessingChains() {
return processingChains;
}
public SearchChains getSearchChains() {
if (containerSearch == null)
throw new IllegalStateException("Search components not found in container cluster '" + getSubId() +
"': Add <search/> to the cluster in services.xml");
return containerSearch.getChains();
}
public ContainerSearch getSearch() {
return containerSearch;
}
public void setSearch(ContainerSearch containerSearch) {
this.containerSearch = containerSearch;
}
public void setHttp(Http http) {
this.http = http;
addChild(http);
}
public Http getHttp() {
return http;
}
public ContainerDocproc getDocproc() {
return containerDocproc;
}
public void setDocproc(ContainerDocproc containerDocproc) {
this.containerDocproc = containerDocproc;
}
public ContainerDocumentApi getDocumentApi() {
return containerDocumentApi;
}
public void setDocumentApi(ContainerDocumentApi containerDocumentApi) {
this.containerDocumentApi = containerDocumentApi;
}
public DocprocChains getDocprocChains() {
if (containerDocproc == null)
throw new IllegalStateException("Document processing components not found in container cluster '" + getSubId() +
"': Add <document-processing/> to the cluster in services.xml");
return containerDocproc.getChains();
}
@SuppressWarnings("unchecked")
public Collection<Handler<?>> getHandlers() {
return (Collection<Handler<?>>)(Collection)componentGroup.getComponents(Handler.class);
}
public void setSecretStore(SecretStore secretStore) {
this.secretStore = secretStore;
}
public Optional<SecretStore> getSecretStore() {
return Optional.ofNullable(secretStore);
}
public Map<ComponentId, Component<?, ?>> getComponentsMap() {
return componentGroup.getComponentMap();
}
/** Returns all components in this cluster (generic, handlers, chained) */
public Collection<Component<?, ?>> getAllComponents() {
List<Component<?, ?>> allComponents = new ArrayList<>();
recursivelyFindAllComponents(allComponents, this);
Collections.sort(allComponents);
return Collections.unmodifiableCollection(allComponents);
}
private void recursivelyFindAllComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) {
for (AbstractConfigProducer<?> child: current.getChildren().values()) {
if (child instanceof Component)
allComponents.add((Component<?, ?>) child);
if (!(child instanceof Container))
recursivelyFindAllComponents(allComponents, child);
}
}
@Override
public void getConfig(ComponentsConfig.Builder builder) {
builder.components.addAll(ComponentsConfigGenerator.generate(getAllComponents()));
builder.components(new ComponentsConfig.Components.Builder().id("com.yahoo.container.core.config.HandlersConfigurerDi$RegistriesHack"));
}
@Override
public void getConfig(JdiscBindingsConfig.Builder builder) {
builder.handlers.putAll(DiscBindingsConfigGenerator.generate(getHandlers()));
}
@Override
public void getConfig(DocumentmanagerConfig.Builder builder) {
if (containerDocproc != null && containerDocproc.isCompressDocuments())
builder.enablecompression(true);
}
@Override
public void getConfig(ContainerDocumentConfig.Builder builder) {
for (Map.Entry<String, String> e : concreteDocumentTypes.entrySet()) {
ContainerDocumentConfig.Doctype.Builder dtb = new ContainerDocumentConfig.Doctype.Builder();
dtb.type(e.getKey());
dtb.factorycomponent(e.getValue());
builder.doctype(dtb);
}
}
@Override
public void getConfig(HealthMonitorConfig.Builder builder) {
Monitoring monitoring = getMonitoringService();
if (monitoring != null) {
builder.snapshot_interval(monitoring.getIntervalSeconds());
}
}
@Override
public void getConfig(ApplicationMetadataConfig.Builder builder) {
if (applicationMetaData != null) {
builder.name(applicationMetaData.getApplicationId().application().value()).
user(applicationMetaData.getDeployedByUser()).
path(applicationMetaData.getDeployPath()).
timestamp(applicationMetaData.getDeployTimestamp()).
checksum(applicationMetaData.getChecksum()).
generation(applicationMetaData.getGeneration());
}
}
/**
* Adds a bundle present at a known location at the target container nodes.
*
* @param bundlePath usually an absolute path, e.g. '$VESPA_HOME/lib/jars/foo.jar'
*/
public final void addPlatformBundle(Path bundlePath) {
platformBundles.add(bundlePath);
}
@Override
public void getConfig(BundlesConfig.Builder builder) {
platformBundles.stream() .map(ContainerCluster::toFileReferenceString)
.forEach(builder::bundle);
}
private static String toFileReferenceString(Path path) {
return DISK_BUNDLE_PREFIX + path.toString();
}
@Override
public void getConfig(QrSearchersConfig.Builder builder) {
if (containerSearch != null) containerSearch.getConfig(builder);
}
@Override
public void getConfig(QrStartConfig.Builder builder) {
builder.jvm
.verbosegc(false)
.availableProcessors(2)
.compressedClassSpaceSize(32)
.minHeapsize(32)
.heapsize(512)
.heapSizeAsPercentageOfPhysicalMemory(0)
.gcopts(Objects.requireNonNullElse(jvmGCOptions, G1GC));
if (environmentVars != null) {
builder.qrs.env(environmentVars);
}
}
@Override
public void getConfig(DocprocConfig.Builder builder) {
if (containerDocproc != null) containerDocproc.getConfig(builder);
}
@Override
public void getConfig(PageTemplatesConfig.Builder builder) {
if (containerSearch != null) containerSearch.getConfig(builder);
}
@Override
public void getConfig(SemanticRulesConfig.Builder builder) {
if (containerSearch != null) containerSearch.getConfig(builder);
}
@Override
public void getConfig(QueryProfilesConfig.Builder builder) {
if (containerSearch != null) containerSearch.getConfig(builder);
}
@Override
public void getConfig(SchemamappingConfig.Builder builder) {
if (containerDocproc != null) containerDocproc.getConfig(builder);
}
@Override
public void getConfig(IndexInfoConfig.Builder builder) {
if (containerSearch != null) containerSearch.getConfig(builder);
}
public void initialize(Map<String, AbstractSearchCluster> clusterMap) {
if (containerSearch != null) containerSearch.connectSearchClusters(clusterMap);
}
public void addDefaultSearchAccessLog() {
addComponent(new AccessLogComponent(AccessLogComponent.AccessLogType.jsonAccessLog, getName(), isHostedVespa));
}
@Override
public void getConfig(IlscriptsConfig.Builder builder) {
List<AbstractSearchCluster> searchClusters = new ArrayList<>();
searchClusters.addAll(Content.getSearchClusters(getRoot().configModelRepo()));
for (AbstractSearchCluster searchCluster : searchClusters) {
searchCluster.getConfig(builder);
}
}
@Override
public void getConfig(ClusterInfoConfig.Builder builder) {
builder.clusterId(name);
builder.nodeCount(containers.size());
for (Service service : getDescendantServices()) {
builder.services.add(new ClusterInfoConfig.Services.Builder()
.index(Integer.parseInt(service.getServicePropertyString("index", "99999")))
.hostname(service.getHostName())
.ports(getPorts(service)));
}
}
/**
* Returns a config server config containing the right zone settings (and defaults for the rest).
* This is useful to allow applications to find out in which zone they are runnung by having the Zone
* object (which is constructed from this config) injected.
*/
@Override
public void getConfig(ConfigserverConfig.Builder builder) {
builder.system(zone.system().value());
builder.environment(zone.environment().value());
builder.region(zone.region().value());
}
private List<ClusterInfoConfig.Services.Ports.Builder> getPorts(Service service) {
List<ClusterInfoConfig.Services.Ports.Builder> builders = new ArrayList<>();
PortsMeta portsMeta = service.getPortsMeta();
for (int i = 0; i < portsMeta.getNumPorts(); i++) {
builders.add(new ClusterInfoConfig.Services.Ports.Builder()
.number(service.getRelativePort(i))
.tags(ApplicationConfigProducerRoot.getPortTags(portsMeta, i))
);
}
return builders;
}
public boolean isHostedVespa() {
return isHostedVespa;
}
@Override
public void getConfig(RoutingProviderConfig.Builder builder) {
builder.enabled(isHostedVespa);
}
public Map<String, String> concreteDocumentTypes() { return concreteDocumentTypes; }
/** The configured service aliases for the service in this cluster */
public List<String> serviceAliases() { return serviceAliases; }
/** The configured endpoint aliases (fqdn) for the service in this cluster */
public List<String> endpointAliases() { return endpointAliases; }
public void setHostClusterId(String clusterId) { hostClusterId = clusterId; }
/**
* Returns the id of the content cluster which hosts this container cluster, if any.
* This is only set with hosted clusters where this container cluster is set up to run on the nodes
* of a content cluster.
*/
public Optional<String> getHostClusterId() { return Optional.ofNullable(hostClusterId); }
public void setJvmGCOptions(String opts) { this.jvmGCOptions = opts; }
public void setEnvironmentVars(String environmentVars) { this.environmentVars = environmentVars; }
public Optional<String> getJvmGCOptions() { return Optional.ofNullable(jvmGCOptions); }
public final void setRpcServerEnabled(boolean rpcServerEnabled) { this.rpcServerEnabled = rpcServerEnabled; }
boolean rpcServerEnabled() { return rpcServerEnabled; }
boolean httpServerEnabled() { return httpServerEnabled; }
public void setHttpServerEnabled(boolean httpServerEnabled) { this.httpServerEnabled = httpServerEnabled; }
@Override
public String toString() {
return "container cluster '" + getName() + "'";
}
protected abstract boolean messageBusEnabled();
} | class ContainerCluster<CONTAINER extends Container>
extends AbstractConfigProducer<AbstractConfigProducer<?>>
implements
ComponentsConfig.Producer,
JdiscBindingsConfig.Producer,
DocumentmanagerConfig.Producer,
ContainerDocumentConfig.Producer,
HealthMonitorConfig.Producer,
ApplicationMetadataConfig.Producer,
BundlesConfig.Producer,
IndexInfoConfig.Producer,
IlscriptsConfig.Producer,
SchemamappingConfig.Producer,
QrSearchersConfig.Producer,
QrStartConfig.Producer,
QueryProfilesConfig.Producer,
PageTemplatesConfig.Producer,
SemanticRulesConfig.Producer,
DocprocConfig.Producer,
ClusterInfoConfig.Producer,
RoutingProviderConfig.Producer,
ConfigserverConfig.Producer,
ThreadpoolConfig.Producer
{
/**
* URI prefix used for internal, usually programmatic, APIs. URIs using this
* prefix should never considered available for direct use by customers, and
* normal compatibility concerns only applies to libraries using the URIs in
* question, not contents served from the URIs themselves.
*/
public static final String RESERVED_URI_PREFIX = "reserved-for-internal-use";
public static final String APPLICATION_STATUS_HANDLER_CLASS = "com.yahoo.container.handler.observability.ApplicationStatusHandler";
public static final String BINDINGS_OVERVIEW_HANDLER_CLASS = BindingsOverviewHandler.class.getName();
public static final String LOG_HANDLER_CLASS = com.yahoo.container.handler.LogHandler.class.getName();
public static final String DEFAULT_LINGUISTICS_PROVIDER = "com.yahoo.language.provider.DefaultLinguisticsProvider";
public static final String CMS = "-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1";
public static final String G1GC = "-XX:+UseG1GC -XX:MaxTenuringThreshold=15";
public static final String STATE_HANDLER_CLASS = "com.yahoo.container.jdisc.state.StateHandler";
public static final String STATE_HANDLER_BINDING_1 = "http:
public static final String STATE_HANDLER_BINDING_2 = STATE_HANDLER_BINDING_1 + "/*";
public static final String ROOT_HANDLER_PATH = "/";
public static final String ROOT_HANDLER_BINDING = "http:
public static final String VIP_HANDLER_BINDING = "http:
private final String name;
protected List<CONTAINER> containers = new ArrayList<>();
private Http http;
private ProcessingChains processingChains;
private ContainerSearch containerSearch;
private ContainerDocproc containerDocproc;
private ContainerDocumentApi containerDocumentApi;
private SecretStore secretStore;
private boolean rpcServerEnabled = true;
private boolean httpServerEnabled = true;
private final Set<Path> platformBundles = new LinkedHashSet<>();
private final List<String> serviceAliases = new ArrayList<>();
private final List<String> endpointAliases = new ArrayList<>();
private final ComponentGroup<Component<?, ?>> componentGroup;
private final boolean isHostedVespa;
private Map<String, String> concreteDocumentTypes = new LinkedHashMap<>();
private ApplicationMetaData applicationMetaData = null;
/** The zone this is deployed in, or the default zone if not on hosted Vespa */
private Zone zone;
private String hostClusterId = null;
private String jvmGCOptions = null;
private String environmentVars = null;
public void setZone(Zone zone) {
this.zone = zone;
}
public Zone getZone() {
return zone;
}
public void addDefaultHandlersWithVip() {
addDefaultHandlersExceptStatus();
addVipHandler();
}
public final void addDefaultHandlersExceptStatus() {
addDefaultRootHandler();
addMetricStateHandler();
addApplicationStatusHandler();
}
public void addMetricStateHandler() {
Handler<AbstractConfigProducer<?>> stateHandler = new Handler<>(
new ComponentModel(STATE_HANDLER_CLASS, null, null, null));
stateHandler.addServerBindings(STATE_HANDLER_BINDING_1, STATE_HANDLER_BINDING_2);
addComponent(stateHandler);
}
public void addDefaultRootHandler() {
Handler<AbstractConfigProducer<?>> handler = new Handler<>(
new ComponentModel(BundleInstantiationSpecification.getFromStrings(
BINDINGS_OVERVIEW_HANDLER_CLASS, null, null), null));
handler.addServerBindings(ROOT_HANDLER_BINDING);
addComponent(handler);
}
public void addApplicationStatusHandler() {
Handler<AbstractConfigProducer<?>> statusHandler = new Handler<>(
new ComponentModel(BundleInstantiationSpecification.getInternalHandlerSpecificationFromStrings(
APPLICATION_STATUS_HANDLER_CLASS, null), null));
statusHandler.addServerBindings("http:
addComponent(statusHandler);
}
public void addVipHandler() {
Handler<?> vipHandler = Handler.fromClassName(FileStatusHandlerComponent.CLASS);
vipHandler.addServerBindings(VIP_HANDLER_BINDING);
addComponent(vipHandler);
}
@SuppressWarnings("deprecation")
private void addJaxProviders() {
addSimpleComponent(com.yahoo.container.xml.providers.DatatypeFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.DocumentBuilderFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.SAXParserFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.SchemaFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.TransformerFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.XMLEventFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.XMLInputFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.XMLOutputFactoryProvider.class);
addSimpleComponent(com.yahoo.container.xml.providers.XPathFactoryProvider.class);
}
public final void addComponent(Component<?, ?> component) {
componentGroup.addComponent(component);
}
public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) {
addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec)));
}
/**
* Removes a component by id
*
* @return the removed component, or null if it was not present
*/
@SuppressWarnings("unused")
public Component removeComponent(ComponentId componentId) {
return componentGroup.removeComponent(componentId);
}
private void addSimpleComponent(Class<?> clazz) {
addSimpleComponent(clazz.getName());
}
protected void addSimpleComponent(String className) {
addComponent(new SimpleComponent(className));
}
public void prepare(DeployState deployState) {
applicationMetaData = deployState.getApplicationPackage().getMetaData();
doPrepare(deployState);
}
protected abstract void doPrepare(DeployState deployState);
public String getName() {
return name;
}
public List<CONTAINER> getContainers() {
return Collections.unmodifiableList(containers);
}
public void addContainer(CONTAINER container) {
container.setClusterName(name);
container.setProp("clustername", name)
.setProp("index", this.containers.size());
containers.add(container);
}
public void addContainers(Collection<CONTAINER> containers) {
containers.forEach(this::addContainer);
}
public void setProcessingChains(ProcessingChains processingChains, String... serverBindings) {
if (this.processingChains != null)
throw new IllegalStateException("ProcessingChains should only be set once.");
this.processingChains = processingChains;
ProcessingHandler<?> processingHandler = new ProcessingHandler<>(
processingChains,
"com.yahoo.processing.handler.ProcessingHandler");
for (String binding: serverBindings)
processingHandler.addServerBindings(binding);
addComponent(processingHandler);
}
ProcessingChains getProcessingChains() {
return processingChains;
}
public SearchChains getSearchChains() {
if (containerSearch == null)
throw new IllegalStateException("Search components not found in container cluster '" + getSubId() +
"': Add <search/> to the cluster in services.xml");
return containerSearch.getChains();
}
public ContainerSearch getSearch() {
return containerSearch;
}
public void setSearch(ContainerSearch containerSearch) {
this.containerSearch = containerSearch;
}
public void setHttp(Http http) {
this.http = http;
addChild(http);
}
public Http getHttp() {
return http;
}
public ContainerDocproc getDocproc() {
return containerDocproc;
}
public void setDocproc(ContainerDocproc containerDocproc) {
this.containerDocproc = containerDocproc;
}
public ContainerDocumentApi getDocumentApi() {
return containerDocumentApi;
}
public void setDocumentApi(ContainerDocumentApi containerDocumentApi) {
this.containerDocumentApi = containerDocumentApi;
}
public DocprocChains getDocprocChains() {
if (containerDocproc == null)
throw new IllegalStateException("Document processing components not found in container cluster '" + getSubId() +
"': Add <document-processing/> to the cluster in services.xml");
return containerDocproc.getChains();
}
@SuppressWarnings("unchecked")
public Collection<Handler<?>> getHandlers() {
return (Collection<Handler<?>>)(Collection)componentGroup.getComponents(Handler.class);
}
public void setSecretStore(SecretStore secretStore) {
this.secretStore = secretStore;
}
public Optional<SecretStore> getSecretStore() {
return Optional.ofNullable(secretStore);
}
public Map<ComponentId, Component<?, ?>> getComponentsMap() {
return componentGroup.getComponentMap();
}
/** Returns all components in this cluster (generic, handlers, chained) */
public Collection<Component<?, ?>> getAllComponents() {
List<Component<?, ?>> allComponents = new ArrayList<>();
recursivelyFindAllComponents(allComponents, this);
Collections.sort(allComponents);
return Collections.unmodifiableCollection(allComponents);
}
private void recursivelyFindAllComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) {
for (AbstractConfigProducer<?> child: current.getChildren().values()) {
if (child instanceof Component)
allComponents.add((Component<?, ?>) child);
if (!(child instanceof Container))
recursivelyFindAllComponents(allComponents, child);
}
}
@Override
public void getConfig(ComponentsConfig.Builder builder) {
builder.components.addAll(ComponentsConfigGenerator.generate(getAllComponents()));
builder.components(new ComponentsConfig.Components.Builder().id("com.yahoo.container.core.config.HandlersConfigurerDi$RegistriesHack"));
}
@Override
public void getConfig(JdiscBindingsConfig.Builder builder) {
builder.handlers.putAll(DiscBindingsConfigGenerator.generate(getHandlers()));
}
@Override
public void getConfig(DocumentmanagerConfig.Builder builder) {
if (containerDocproc != null && containerDocproc.isCompressDocuments())
builder.enablecompression(true);
}
@Override
public void getConfig(ContainerDocumentConfig.Builder builder) {
for (Map.Entry<String, String> e : concreteDocumentTypes.entrySet()) {
ContainerDocumentConfig.Doctype.Builder dtb = new ContainerDocumentConfig.Doctype.Builder();
dtb.type(e.getKey());
dtb.factorycomponent(e.getValue());
builder.doctype(dtb);
}
}
@Override
public void getConfig(HealthMonitorConfig.Builder builder) {
Monitoring monitoring = getMonitoringService();
if (monitoring != null) {
builder.snapshot_interval(monitoring.getIntervalSeconds());
}
}
@Override
public void getConfig(ApplicationMetadataConfig.Builder builder) {
if (applicationMetaData != null) {
builder.name(applicationMetaData.getApplicationId().application().value()).
user(applicationMetaData.getDeployedByUser()).
path(applicationMetaData.getDeployPath()).
timestamp(applicationMetaData.getDeployTimestamp()).
checksum(applicationMetaData.getChecksum()).
generation(applicationMetaData.getGeneration());
}
}
/**
* Adds a bundle present at a known location at the target container nodes.
*
* @param bundlePath usually an absolute path, e.g. '$VESPA_HOME/lib/jars/foo.jar'
*/
public final void addPlatformBundle(Path bundlePath) {
platformBundles.add(bundlePath);
}
@Override
public void getConfig(BundlesConfig.Builder builder) {
platformBundles.stream() .map(ContainerCluster::toFileReferenceString)
.forEach(builder::bundle);
}
private static String toFileReferenceString(Path path) {
return DISK_BUNDLE_PREFIX + path.toString();
}
@Override
public void getConfig(QrSearchersConfig.Builder builder) {
if (containerSearch != null) containerSearch.getConfig(builder);
}
@Override
public void getConfig(QrStartConfig.Builder builder) {
builder.jvm
.verbosegc(false)
.availableProcessors(2)
.compressedClassSpaceSize(32)
.minHeapsize(32)
.heapsize(512)
.heapSizeAsPercentageOfPhysicalMemory(0)
.gcopts(Objects.requireNonNullElse(jvmGCOptions, G1GC));
if (environmentVars != null) {
builder.qrs.env(environmentVars);
}
}
@Override
public void getConfig(DocprocConfig.Builder builder) {
if (containerDocproc != null) containerDocproc.getConfig(builder);
}
@Override
public void getConfig(PageTemplatesConfig.Builder builder) {
if (containerSearch != null) containerSearch.getConfig(builder);
}
@Override
public void getConfig(SemanticRulesConfig.Builder builder) {
if (containerSearch != null) containerSearch.getConfig(builder);
}
@Override
public void getConfig(QueryProfilesConfig.Builder builder) {
if (containerSearch != null) containerSearch.getConfig(builder);
}
@Override
public void getConfig(SchemamappingConfig.Builder builder) {
if (containerDocproc != null) containerDocproc.getConfig(builder);
}
@Override
public void getConfig(IndexInfoConfig.Builder builder) {
if (containerSearch != null) containerSearch.getConfig(builder);
}
public void initialize(Map<String, AbstractSearchCluster> clusterMap) {
if (containerSearch != null) containerSearch.connectSearchClusters(clusterMap);
}
public void addDefaultSearchAccessLog() {
addComponent(new AccessLogComponent(AccessLogComponent.AccessLogType.jsonAccessLog, getName(), isHostedVespa));
}
@Override
public void getConfig(IlscriptsConfig.Builder builder) {
List<AbstractSearchCluster> searchClusters = new ArrayList<>();
searchClusters.addAll(Content.getSearchClusters(getRoot().configModelRepo()));
for (AbstractSearchCluster searchCluster : searchClusters) {
searchCluster.getConfig(builder);
}
}
@Override
public void getConfig(ClusterInfoConfig.Builder builder) {
builder.clusterId(name);
builder.nodeCount(containers.size());
for (Service service : getDescendantServices()) {
builder.services.add(new ClusterInfoConfig.Services.Builder()
.index(Integer.parseInt(service.getServicePropertyString("index", "99999")))
.hostname(service.getHostName())
.ports(getPorts(service)));
}
}
/**
* Returns a config server config containing the right zone settings (and defaults for the rest).
* This is useful to allow applications to find out in which zone they are runnung by having the Zone
* object (which is constructed from this config) injected.
*/
@Override
public void getConfig(ConfigserverConfig.Builder builder) {
builder.system(zone.system().value());
builder.environment(zone.environment().value());
builder.region(zone.region().value());
}
private List<ClusterInfoConfig.Services.Ports.Builder> getPorts(Service service) {
List<ClusterInfoConfig.Services.Ports.Builder> builders = new ArrayList<>();
PortsMeta portsMeta = service.getPortsMeta();
for (int i = 0; i < portsMeta.getNumPorts(); i++) {
builders.add(new ClusterInfoConfig.Services.Ports.Builder()
.number(service.getRelativePort(i))
.tags(ApplicationConfigProducerRoot.getPortTags(portsMeta, i))
);
}
return builders;
}
public boolean isHostedVespa() {
return isHostedVespa;
}
@Override
public void getConfig(RoutingProviderConfig.Builder builder) {
builder.enabled(isHostedVespa);
}
public Map<String, String> concreteDocumentTypes() { return concreteDocumentTypes; }
/** The configured service aliases for the service in this cluster */
public List<String> serviceAliases() { return serviceAliases; }
/** The configured endpoint aliases (fqdn) for the service in this cluster */
public List<String> endpointAliases() { return endpointAliases; }
public void setHostClusterId(String clusterId) { hostClusterId = clusterId; }
/**
* Returns the id of the content cluster which hosts this container cluster, if any.
* This is only set with hosted clusters where this container cluster is set up to run on the nodes
* of a content cluster.
*/
public Optional<String> getHostClusterId() { return Optional.ofNullable(hostClusterId); }
public void setJvmGCOptions(String opts) { this.jvmGCOptions = opts; }
public void setEnvironmentVars(String environmentVars) { this.environmentVars = environmentVars; }
public Optional<String> getJvmGCOptions() { return Optional.ofNullable(jvmGCOptions); }
public final void setRpcServerEnabled(boolean rpcServerEnabled) { this.rpcServerEnabled = rpcServerEnabled; }
boolean rpcServerEnabled() { return rpcServerEnabled; }
boolean httpServerEnabled() { return httpServerEnabled; }
public void setHttpServerEnabled(boolean httpServerEnabled) { this.httpServerEnabled = httpServerEnabled; }
@Override
public String toString() {
return "container cluster '" + getName() + "'";
}
protected abstract boolean messageBusEnabled();
} |
The `null` behaviors in this class are all over the place. `endpointCertificateMetadata(String)`, `containerEndpoints(String)` ignores it, as does `athenzDomain(AthenzDomain)`, while `athenzDomain(String)`, and `tlsSecretsKeyName(String)` (correctly, IMO) resets the state. | public Builder dockerImageRepository(String dockerImageRepository) {
if (dockerImageRepository == null) return this;
this.dockerImageRepository = Optional.of(DockerImage.fromString(dockerImageRepository));
return this;
} | if (dockerImageRepository == null) return this; | public Builder dockerImageRepository(String dockerImageRepository) {
this.dockerImageRepository = (dockerImageRepository == null)
? Optional.empty()
: Optional.of(DockerImage.fromString(dockerImageRepository));
return this;
} | class Builder {
private boolean ignoreValidationErrors = false;
private boolean dryRun = false;
private boolean verbose = false;
private boolean isBootstrap = false;
private ApplicationId applicationId = ApplicationId.defaultId();
private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(30));
private Optional<Version> vespaVersion = Optional.empty();
private List<ContainerEndpoint> containerEndpoints = List.of();
private Optional<String> tlsSecretsKeyName = Optional.empty();
private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty();
private Optional<DockerImage> dockerImageRepository = Optional.empty();
private Optional<AthenzDomain> athenzDomain = Optional.empty();
public Builder() { }
public Builder applicationId(ApplicationId applicationId) {
this.applicationId = applicationId;
return this;
}
public Builder ignoreValidationErrors(boolean ignoreValidationErrors) {
this.ignoreValidationErrors = ignoreValidationErrors;
return this;
}
public Builder dryRun(boolean dryRun) {
this.dryRun = dryRun;
return this;
}
public Builder verbose(boolean verbose) {
this.verbose = verbose;
return this;
}
public Builder isBootstrap(boolean isBootstrap) {
this.isBootstrap = isBootstrap;
return this;
}
public Builder timeoutBudget(TimeoutBudget timeoutBudget) {
this.timeoutBudget = timeoutBudget;
return this;
}
public Builder vespaVersion(String vespaVersion) {
Optional<Version> version = Optional.empty();
if (vespaVersion != null && !vespaVersion.isEmpty()) {
version = Optional.of(Version.fromString(vespaVersion));
}
this.vespaVersion = version;
return this;
}
public Builder vespaVersion(Version vespaVersion) {
this.vespaVersion = Optional.ofNullable(vespaVersion);
return this;
}
public Builder containerEndpoints(String serialized) {
if (serialized == null) return this;
Slime slime = SlimeUtils.jsonToSlime(serialized);
containerEndpoints = ContainerEndpointSerializer.endpointListFromSlime(slime);
return this;
}
public Builder tlsSecretsKeyName(String tlsSecretsKeyName) {
this.tlsSecretsKeyName = Optional.ofNullable(tlsSecretsKeyName)
.filter(s -> ! s.isEmpty());
return this;
}
public Builder endpointCertificateMetadata(String serialized) {
if(serialized == null) return this;
Slime slime = SlimeUtils.jsonToSlime(serialized);
endpointCertificateMetadata = Optional.of(EndpointCertificateMetadataSerializer.fromSlime(slime.get()));
return this;
}
public Builder dockerImageRepository(DockerImage dockerImageRepository) {
if (dockerImageRepository == null) return this;
this.dockerImageRepository = Optional.of(dockerImageRepository);
return this;
}
public Builder athenzDomain(String athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from);
return this;
}
public Builder athenzDomain(AthenzDomain athenzDomain) {
this.athenzDomain = Optional.of(athenzDomain);
return this;
}
public PrepareParams build() {
return new PrepareParams(applicationId, timeoutBudget, ignoreValidationErrors, dryRun,
verbose, isBootstrap, vespaVersion, containerEndpoints, tlsSecretsKeyName,
endpointCertificateMetadata, dockerImageRepository, athenzDomain);
}
} | class Builder {
private boolean ignoreValidationErrors = false;
private boolean dryRun = false;
private boolean verbose = false;
private boolean isBootstrap = false;
private ApplicationId applicationId = ApplicationId.defaultId();
private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(30));
private Optional<Version> vespaVersion = Optional.empty();
private List<ContainerEndpoint> containerEndpoints = List.of();
private Optional<String> tlsSecretsKeyName = Optional.empty();
private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty();
private Optional<DockerImage> dockerImageRepository = Optional.empty();
private Optional<AthenzDomain> athenzDomain = Optional.empty();
public Builder() { }
public Builder applicationId(ApplicationId applicationId) {
this.applicationId = applicationId;
return this;
}
public Builder ignoreValidationErrors(boolean ignoreValidationErrors) {
this.ignoreValidationErrors = ignoreValidationErrors;
return this;
}
public Builder dryRun(boolean dryRun) {
this.dryRun = dryRun;
return this;
}
public Builder verbose(boolean verbose) {
this.verbose = verbose;
return this;
}
public Builder isBootstrap(boolean isBootstrap) {
this.isBootstrap = isBootstrap;
return this;
}
public Builder timeoutBudget(TimeoutBudget timeoutBudget) {
this.timeoutBudget = timeoutBudget;
return this;
}
public Builder vespaVersion(String vespaVersion) {
Optional<Version> version = Optional.empty();
if (vespaVersion != null && !vespaVersion.isEmpty()) {
version = Optional.of(Version.fromString(vespaVersion));
}
this.vespaVersion = version;
return this;
}
public Builder vespaVersion(Version vespaVersion) {
this.vespaVersion = Optional.ofNullable(vespaVersion);
return this;
}
public Builder containerEndpoints(String serialized) {
this.containerEndpoints = (serialized == null)
? List.of()
: ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized));
return this;
}
public Builder tlsSecretsKeyName(String tlsSecretsKeyName) {
this.tlsSecretsKeyName = Optional.ofNullable(tlsSecretsKeyName)
.filter(s -> ! s.isEmpty());
return this;
}
public Builder endpointCertificateMetadata(String serialized) {
this.endpointCertificateMetadata = (serialized == null)
? Optional.empty()
: Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder dockerImageRepository(DockerImage dockerImageRepository) {
this.dockerImageRepository = Optional.ofNullable(dockerImageRepository);
return this;
}
public Builder athenzDomain(String athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from);
return this;
}
public Builder athenzDomain(AthenzDomain athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain);
return this;
}
public PrepareParams build() {
return new PrepareParams(applicationId, timeoutBudget, ignoreValidationErrors, dryRun,
verbose, isBootstrap, vespaVersion, containerEndpoints, tlsSecretsKeyName,
endpointCertificateMetadata, dockerImageRepository, athenzDomain);
}
} |
Yes, good point, I'll try to clean it up | public Builder dockerImageRepository(String dockerImageRepository) {
if (dockerImageRepository == null) return this;
this.dockerImageRepository = Optional.of(DockerImage.fromString(dockerImageRepository));
return this;
} | if (dockerImageRepository == null) return this; | public Builder dockerImageRepository(String dockerImageRepository) {
this.dockerImageRepository = (dockerImageRepository == null)
? Optional.empty()
: Optional.of(DockerImage.fromString(dockerImageRepository));
return this;
} | class Builder {
private boolean ignoreValidationErrors = false;
private boolean dryRun = false;
private boolean verbose = false;
private boolean isBootstrap = false;
private ApplicationId applicationId = ApplicationId.defaultId();
private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(30));
private Optional<Version> vespaVersion = Optional.empty();
private List<ContainerEndpoint> containerEndpoints = List.of();
private Optional<String> tlsSecretsKeyName = Optional.empty();
private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty();
private Optional<DockerImage> dockerImageRepository = Optional.empty();
private Optional<AthenzDomain> athenzDomain = Optional.empty();
public Builder() { }
public Builder applicationId(ApplicationId applicationId) {
this.applicationId = applicationId;
return this;
}
public Builder ignoreValidationErrors(boolean ignoreValidationErrors) {
this.ignoreValidationErrors = ignoreValidationErrors;
return this;
}
public Builder dryRun(boolean dryRun) {
this.dryRun = dryRun;
return this;
}
public Builder verbose(boolean verbose) {
this.verbose = verbose;
return this;
}
public Builder isBootstrap(boolean isBootstrap) {
this.isBootstrap = isBootstrap;
return this;
}
public Builder timeoutBudget(TimeoutBudget timeoutBudget) {
this.timeoutBudget = timeoutBudget;
return this;
}
public Builder vespaVersion(String vespaVersion) {
Optional<Version> version = Optional.empty();
if (vespaVersion != null && !vespaVersion.isEmpty()) {
version = Optional.of(Version.fromString(vespaVersion));
}
this.vespaVersion = version;
return this;
}
public Builder vespaVersion(Version vespaVersion) {
this.vespaVersion = Optional.ofNullable(vespaVersion);
return this;
}
public Builder containerEndpoints(String serialized) {
if (serialized == null) return this;
Slime slime = SlimeUtils.jsonToSlime(serialized);
containerEndpoints = ContainerEndpointSerializer.endpointListFromSlime(slime);
return this;
}
public Builder tlsSecretsKeyName(String tlsSecretsKeyName) {
this.tlsSecretsKeyName = Optional.ofNullable(tlsSecretsKeyName)
.filter(s -> ! s.isEmpty());
return this;
}
public Builder endpointCertificateMetadata(String serialized) {
if(serialized == null) return this;
Slime slime = SlimeUtils.jsonToSlime(serialized);
endpointCertificateMetadata = Optional.of(EndpointCertificateMetadataSerializer.fromSlime(slime.get()));
return this;
}
public Builder dockerImageRepository(DockerImage dockerImageRepository) {
if (dockerImageRepository == null) return this;
this.dockerImageRepository = Optional.of(dockerImageRepository);
return this;
}
public Builder athenzDomain(String athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from);
return this;
}
public Builder athenzDomain(AthenzDomain athenzDomain) {
this.athenzDomain = Optional.of(athenzDomain);
return this;
}
public PrepareParams build() {
return new PrepareParams(applicationId, timeoutBudget, ignoreValidationErrors, dryRun,
verbose, isBootstrap, vespaVersion, containerEndpoints, tlsSecretsKeyName,
endpointCertificateMetadata, dockerImageRepository, athenzDomain);
}
} | class Builder {
private boolean ignoreValidationErrors = false;
private boolean dryRun = false;
private boolean verbose = false;
private boolean isBootstrap = false;
private ApplicationId applicationId = ApplicationId.defaultId();
private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(30));
private Optional<Version> vespaVersion = Optional.empty();
private List<ContainerEndpoint> containerEndpoints = List.of();
private Optional<String> tlsSecretsKeyName = Optional.empty();
private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty();
private Optional<DockerImage> dockerImageRepository = Optional.empty();
private Optional<AthenzDomain> athenzDomain = Optional.empty();
public Builder() { }
public Builder applicationId(ApplicationId applicationId) {
this.applicationId = applicationId;
return this;
}
public Builder ignoreValidationErrors(boolean ignoreValidationErrors) {
this.ignoreValidationErrors = ignoreValidationErrors;
return this;
}
public Builder dryRun(boolean dryRun) {
this.dryRun = dryRun;
return this;
}
public Builder verbose(boolean verbose) {
this.verbose = verbose;
return this;
}
public Builder isBootstrap(boolean isBootstrap) {
this.isBootstrap = isBootstrap;
return this;
}
public Builder timeoutBudget(TimeoutBudget timeoutBudget) {
this.timeoutBudget = timeoutBudget;
return this;
}
public Builder vespaVersion(String vespaVersion) {
Optional<Version> version = Optional.empty();
if (vespaVersion != null && !vespaVersion.isEmpty()) {
version = Optional.of(Version.fromString(vespaVersion));
}
this.vespaVersion = version;
return this;
}
public Builder vespaVersion(Version vespaVersion) {
this.vespaVersion = Optional.ofNullable(vespaVersion);
return this;
}
public Builder containerEndpoints(String serialized) {
this.containerEndpoints = (serialized == null)
? List.of()
: ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized));
return this;
}
public Builder tlsSecretsKeyName(String tlsSecretsKeyName) {
this.tlsSecretsKeyName = Optional.ofNullable(tlsSecretsKeyName)
.filter(s -> ! s.isEmpty());
return this;
}
public Builder endpointCertificateMetadata(String serialized) {
this.endpointCertificateMetadata = (serialized == null)
? Optional.empty()
: Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder dockerImageRepository(DockerImage dockerImageRepository) {
this.dockerImageRepository = Optional.ofNullable(dockerImageRepository);
return this;
}
public Builder athenzDomain(String athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from);
return this;
}
public Builder athenzDomain(AthenzDomain athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain);
return this;
}
public PrepareParams build() {
return new PrepareParams(applicationId, timeoutBudget, ignoreValidationErrors, dryRun,
verbose, isBootstrap, vespaVersion, containerEndpoints, tlsSecretsKeyName,
endpointCertificateMetadata, dockerImageRepository, athenzDomain);
}
} |
Is this safe? Looks like this is called during deployment and deciding the targeted resources. If this is not a new deployment that has run for some time, then it probably has target resources. If the user then changes the limits so that the target is outside, this will throw? | public Application withClusterLimits(ClusterSpec.Id id, ClusterResources min, ClusterResources max) {
Cluster cluster = clusters.get(id);
if (cluster == null)
cluster = new Cluster(id, min, max, Optional.empty(), Optional.empty());
else
cluster = cluster.withLimits(min, max);
return with(cluster);
} | cluster = cluster.withLimits(min, max); | public Application withClusterLimits(ClusterSpec.Id id, ClusterResources min, ClusterResources max) {
Cluster cluster = clusters.get(id);
if (cluster == null)
cluster = new Cluster(id, min, max, Optional.empty(), Optional.empty());
else
cluster = cluster.withLimits(min, max);
return with(cluster);
} | class Application {
private final ApplicationId id;
private final Map<ClusterSpec.Id, Cluster> clusters;
public Application(ApplicationId id) {
this(id, Map.of());
}
public Application(ApplicationId id, Collection<Cluster> clusters) {
this(id, clusters.stream().collect(Collectors.toMap(c -> c.id(), c -> c)));
}
private Application(ApplicationId id, Map<ClusterSpec.Id, Cluster> clusters) {
this.id = id;
this.clusters = clusters;
}
public ApplicationId id() { return id; }
public Map<ClusterSpec.Id, Cluster> clusters() { return clusters; }
public Optional<Cluster> cluster(ClusterSpec.Id id) {
return Optional.ofNullable(clusters.get(id));
}
public Application with(Cluster cluster) {
Map<ClusterSpec.Id, Cluster> clusters = new HashMap<>(this.clusters);
clusters.put(cluster.id(), cluster);
return new Application(id, clusters);
}
/**
* Returns an application with the given cluster having the min and max resource limits of the given cluster.
* If the cluster has a target which is not inside the new limits, the target is removed.
*/
/**
* Returns an application with the given target for the given cluster,
* if it exists and the target is within the bounds
*/
public Application withClusterTarget(ClusterSpec.Id id, ClusterResources target) {
Cluster cluster = clusters.get(id);
if (cluster == null) return this;
return with(cluster.withTarget(target));
}
@Override
public int hashCode() {
return id.hashCode();
}
@Override
public boolean equals(Object other) {
if (other == this) return true;
if ( ! (other instanceof Application)) return false;
return ((Application)other).id().equals(this.id());
}
@Override
public String toString() {
return "application '" + id + "'";
}
} | class Application {
private final ApplicationId id;
private final Map<ClusterSpec.Id, Cluster> clusters;
public Application(ApplicationId id) {
this(id, Map.of());
}
public Application(ApplicationId id, Collection<Cluster> clusters) {
this(id, clusters.stream().collect(Collectors.toMap(c -> c.id(), c -> c)));
}
private Application(ApplicationId id, Map<ClusterSpec.Id, Cluster> clusters) {
this.id = id;
this.clusters = clusters;
}
public ApplicationId id() { return id; }
public Map<ClusterSpec.Id, Cluster> clusters() { return clusters; }
public Optional<Cluster> cluster(ClusterSpec.Id id) {
return Optional.ofNullable(clusters.get(id));
}
public Application with(Cluster cluster) {
Map<ClusterSpec.Id, Cluster> clusters = new HashMap<>(this.clusters);
clusters.put(cluster.id(), cluster);
return new Application(id, clusters);
}
/**
* Returns an application with the given cluster having the min and max resource limits of the given cluster.
* If the cluster has a target which is not inside the new limits, the target is removed.
*/
/**
* Returns an application with the given target for the given cluster,
* if it exists and the target is within the bounds
*/
public Application withClusterTarget(ClusterSpec.Id id, ClusterResources target) {
Cluster cluster = clusters.get(id);
if (cluster == null) return this;
return with(cluster.withTarget(target));
}
@Override
public int hashCode() {
return id.hashCode();
}
@Override
public boolean equals(Object other) {
if (other == this) return true;
if ( ! (other instanceof Application)) return false;
return ((Application)other).id().equals(this.id());
}
@Override
public String toString() {
return "application '" + id + "'";
}
} |
It will not throw but wipe the target. I think that is correct. In the case where new limits are set which are outside the current size we'll pick the lowest limit. That's not the right choice if the entire new window is *below* the current size, but this isn't perfectly straightforward since it's multidimensional ... so I plan to do some more work on making that safe, but it's orthogonal to this. | public Application withClusterLimits(ClusterSpec.Id id, ClusterResources min, ClusterResources max) {
Cluster cluster = clusters.get(id);
if (cluster == null)
cluster = new Cluster(id, min, max, Optional.empty(), Optional.empty());
else
cluster = cluster.withLimits(min, max);
return with(cluster);
} | cluster = cluster.withLimits(min, max); | public Application withClusterLimits(ClusterSpec.Id id, ClusterResources min, ClusterResources max) {
Cluster cluster = clusters.get(id);
if (cluster == null)
cluster = new Cluster(id, min, max, Optional.empty(), Optional.empty());
else
cluster = cluster.withLimits(min, max);
return with(cluster);
} | class Application {
private final ApplicationId id;
private final Map<ClusterSpec.Id, Cluster> clusters;
public Application(ApplicationId id) {
this(id, Map.of());
}
public Application(ApplicationId id, Collection<Cluster> clusters) {
this(id, clusters.stream().collect(Collectors.toMap(c -> c.id(), c -> c)));
}
private Application(ApplicationId id, Map<ClusterSpec.Id, Cluster> clusters) {
this.id = id;
this.clusters = clusters;
}
public ApplicationId id() { return id; }
public Map<ClusterSpec.Id, Cluster> clusters() { return clusters; }
public Optional<Cluster> cluster(ClusterSpec.Id id) {
return Optional.ofNullable(clusters.get(id));
}
public Application with(Cluster cluster) {
Map<ClusterSpec.Id, Cluster> clusters = new HashMap<>(this.clusters);
clusters.put(cluster.id(), cluster);
return new Application(id, clusters);
}
/**
* Returns an application with the given cluster having the min and max resource limits of the given cluster.
* If the cluster has a target which is not inside the new limits, the target is removed.
*/
/**
* Returns an application with the given target for the given cluster,
* if it exists and the target is within the bounds
*/
public Application withClusterTarget(ClusterSpec.Id id, ClusterResources target) {
Cluster cluster = clusters.get(id);
if (cluster == null) return this;
return with(cluster.withTarget(target));
}
@Override
public int hashCode() {
return id.hashCode();
}
@Override
public boolean equals(Object other) {
if (other == this) return true;
if ( ! (other instanceof Application)) return false;
return ((Application)other).id().equals(this.id());
}
@Override
public String toString() {
return "application '" + id + "'";
}
} | class Application {
private final ApplicationId id;
private final Map<ClusterSpec.Id, Cluster> clusters;
public Application(ApplicationId id) {
this(id, Map.of());
}
public Application(ApplicationId id, Collection<Cluster> clusters) {
this(id, clusters.stream().collect(Collectors.toMap(c -> c.id(), c -> c)));
}
private Application(ApplicationId id, Map<ClusterSpec.Id, Cluster> clusters) {
this.id = id;
this.clusters = clusters;
}
public ApplicationId id() { return id; }
public Map<ClusterSpec.Id, Cluster> clusters() { return clusters; }
public Optional<Cluster> cluster(ClusterSpec.Id id) {
return Optional.ofNullable(clusters.get(id));
}
public Application with(Cluster cluster) {
Map<ClusterSpec.Id, Cluster> clusters = new HashMap<>(this.clusters);
clusters.put(cluster.id(), cluster);
return new Application(id, clusters);
}
/**
* Returns an application with the given cluster having the min and max resource limits of the given cluster.
* If the cluster has a target which is not inside the new limits, the target is removed.
*/
/**
* Returns an application with the given target for the given cluster,
* if it exists and the target is within the bounds
*/
public Application withClusterTarget(ClusterSpec.Id id, ClusterResources target) {
Cluster cluster = clusters.get(id);
if (cluster == null) return this;
return with(cluster.withTarget(target));
}
@Override
public int hashCode() {
return id.hashCode();
}
@Override
public boolean equals(Object other) {
if (other == this) return true;
if ( ! (other instanceof Application)) return false;
return ((Application)other).id().equals(this.id());
}
@Override
public String toString() {
return "application '" + id + "'";
}
} |
Where does it wipe it? `Cluster::withLimits` creates `new Cluster` with the new `min`, `max` while keeping old `target`? | public Application withClusterLimits(ClusterSpec.Id id, ClusterResources min, ClusterResources max) {
Cluster cluster = clusters.get(id);
if (cluster == null)
cluster = new Cluster(id, min, max, Optional.empty(), Optional.empty());
else
cluster = cluster.withLimits(min, max);
return with(cluster);
} | cluster = cluster.withLimits(min, max); | public Application withClusterLimits(ClusterSpec.Id id, ClusterResources min, ClusterResources max) {
Cluster cluster = clusters.get(id);
if (cluster == null)
cluster = new Cluster(id, min, max, Optional.empty(), Optional.empty());
else
cluster = cluster.withLimits(min, max);
return with(cluster);
} | class Application {
private final ApplicationId id;
private final Map<ClusterSpec.Id, Cluster> clusters;
public Application(ApplicationId id) {
this(id, Map.of());
}
public Application(ApplicationId id, Collection<Cluster> clusters) {
this(id, clusters.stream().collect(Collectors.toMap(c -> c.id(), c -> c)));
}
private Application(ApplicationId id, Map<ClusterSpec.Id, Cluster> clusters) {
this.id = id;
this.clusters = clusters;
}
public ApplicationId id() { return id; }
public Map<ClusterSpec.Id, Cluster> clusters() { return clusters; }
public Optional<Cluster> cluster(ClusterSpec.Id id) {
return Optional.ofNullable(clusters.get(id));
}
public Application with(Cluster cluster) {
Map<ClusterSpec.Id, Cluster> clusters = new HashMap<>(this.clusters);
clusters.put(cluster.id(), cluster);
return new Application(id, clusters);
}
/**
* Returns an application with the given cluster having the min and max resource limits of the given cluster.
* If the cluster has a target which is not inside the new limits, the target is removed.
*/
/**
* Returns an application with the given target for the given cluster,
* if it exists and the target is within the bounds
*/
public Application withClusterTarget(ClusterSpec.Id id, ClusterResources target) {
Cluster cluster = clusters.get(id);
if (cluster == null) return this;
return with(cluster.withTarget(target));
}
@Override
public int hashCode() {
return id.hashCode();
}
@Override
public boolean equals(Object other) {
if (other == this) return true;
if ( ! (other instanceof Application)) return false;
return ((Application)other).id().equals(this.id());
}
@Override
public String toString() {
return "application '" + id + "'";
}
} | class Application {
private final ApplicationId id;
private final Map<ClusterSpec.Id, Cluster> clusters;
public Application(ApplicationId id) {
this(id, Map.of());
}
public Application(ApplicationId id, Collection<Cluster> clusters) {
this(id, clusters.stream().collect(Collectors.toMap(c -> c.id(), c -> c)));
}
private Application(ApplicationId id, Map<ClusterSpec.Id, Cluster> clusters) {
this.id = id;
this.clusters = clusters;
}
public ApplicationId id() { return id; }
public Map<ClusterSpec.Id, Cluster> clusters() { return clusters; }
public Optional<Cluster> cluster(ClusterSpec.Id id) {
return Optional.ofNullable(clusters.get(id));
}
public Application with(Cluster cluster) {
Map<ClusterSpec.Id, Cluster> clusters = new HashMap<>(this.clusters);
clusters.put(cluster.id(), cluster);
return new Application(id, clusters);
}
/**
* Returns an application with the given cluster having the min and max resource limits of the given cluster.
* If the cluster has a target which is not inside the new limits, the target is removed.
*/
/**
* Returns an application with the given target for the given cluster,
* if it exists and the target is within the bounds
*/
public Application withClusterTarget(ClusterSpec.Id id, ClusterResources target) {
Cluster cluster = clusters.get(id);
if (cluster == null) return this;
return with(cluster.withTarget(target));
}
@Override
public int hashCode() {
return id.hashCode();
}
@Override
public boolean equals(Object other) {
if (other == this) return true;
if ( ! (other instanceof Application)) return false;
return ((Application)other).id().equals(this.id());
}
@Override
public String toString() {
return "application '" + id + "'";
}
} |
```suggestion getLogger().log(LogLevel.DEBUG, () -> "Stem to multiple segments '"+segment+"'"); ``` | private Item stem(BlockItem current, StemContext context, Index index) {
Item blockAsItem = (Item)current;
CompositeItem composite;
List<StemList> segments = linguistics.getStemmer().stem(current.stringValue(), index.getStemMode(), context.language);
String indexName = current.getIndexName();
Substring substring = getOffsets(current);
if (segments.size() == 1) {
getLogger().log(LogLevel.DEBUG, "Stem '"+current.stringValue()+"' mode "+index.getStemMode()
+" and language '"+context.language+"' -> '"+segments.get(0)+"'");
TaggableItem w = singleWordSegment(current, segments.get(0), index, substring, context.insidePhrase);
setMetaData(current, context.reverseConnectivity, w);
return (Item) w;
} else if (getLogger().isLoggable(LogLevel.DEBUG)) {
var buf = new StringBuilder();
buf.append("Stem '").append(current.stringValue());
buf.append("' mode ").append(index.getStemMode());
buf.append(" and language '").append(context.language).append("' ->");
for (StemList segment : segments) {
buf.append(" '").append(segment).append("'");
}
getLogger().log(LogLevel.DEBUG, buf.toString());
}
if (context.isCJK)
composite = chooseCompositeForCJK(current, ((Item) current).getParent(), indexName);
else
composite = chooseComposite(current, ((Item) current).getParent(), indexName);
for (StemList segment : segments) {
getLogger().log(LogLevel.DEBUG, "Stem to multiple segments '"+segment+"'");
TaggableItem w = singleWordSegment(current, segment, index, substring, context.insidePhrase);
if (composite instanceof AndSegmentItem) {
setSignificance(w, current);
}
composite.addItem((Item) w);
}
if (composite instanceof AndSegmentItem) {
andSegmentConnectivity(current, context.reverseConnectivity, composite);
}
copyAttributes(blockAsItem, composite);
composite.lock();
if (composite instanceof PhraseSegmentItem) {
PhraseSegmentItem replacement = (PhraseSegmentItem) composite;
setSignificance(replacement, current);
phraseSegmentConnectivity(current, context.reverseConnectivity, replacement);
}
return composite;
} | getLogger().log(LogLevel.DEBUG, "Stem to multiple segments '"+segment+"'"); | private Item stem(BlockItem current, StemContext context, Index index) {
Item blockAsItem = (Item)current;
CompositeItem composite;
List<StemList> segments = linguistics.getStemmer().stem(current.stringValue(), index.getStemMode(), context.language);
String indexName = current.getIndexName();
Substring substring = getOffsets(current);
if (segments.size() == 1) {
getLogger().log(LogLevel.DEBUG, () -> "Stem '"+current.stringValue()+"' mode "+index.getStemMode()
+" and language '"+context.language+"' -> '"+segments.get(0)+"'");
TaggableItem w = singleWordSegment(current, segments.get(0), index, substring, context.insidePhrase);
setMetaData(current, context.reverseConnectivity, w);
return (Item) w;
} else if (getLogger().isLoggable(LogLevel.DEBUG)) {
var buf = new StringBuilder();
buf.append("Stem '").append(current.stringValue());
buf.append("' mode ").append(index.getStemMode());
buf.append(" and language '").append(context.language).append("' ->");
for (StemList segment : segments) {
buf.append(" '").append(segment).append("'");
}
getLogger().log(LogLevel.DEBUG, buf.toString());
}
if (context.isCJK)
composite = chooseCompositeForCJK(current, ((Item) current).getParent(), indexName);
else
composite = chooseComposite(current, ((Item) current).getParent(), indexName);
for (StemList segment : segments) {
getLogger().log(LogLevel.DEBUG, () -> "Stem to multiple segments '"+segment+"'");
TaggableItem w = singleWordSegment(current, segment, index, substring, context.insidePhrase);
if (composite instanceof AndSegmentItem) {
setSignificance(w, current);
}
composite.addItem((Item) w);
}
if (composite instanceof AndSegmentItem) {
andSegmentConnectivity(current, context.reverseConnectivity, composite);
}
copyAttributes(blockAsItem, composite);
composite.lock();
if (composite instanceof PhraseSegmentItem) {
PhraseSegmentItem replacement = (PhraseSegmentItem) composite;
setSignificance(replacement, current);
phraseSegmentConnectivity(current, context.reverseConnectivity, replacement);
}
return composite;
} | class StemContext {
public boolean isCJK = false;
public boolean insidePhrase = false;
public Language language = null;
public IndexFacts.Session indexFacts = null;
public Map<Item, TaggableItem> reverseConnectivity = null;
} | class StemContext {
public boolean isCJK = false;
public boolean insidePhrase = false;
public Language language = null;
public IndexFacts.Session indexFacts = null;
public Map<Item, TaggableItem> reverseConnectivity = null;
} |
In the Cluster constructor. | public Application withClusterLimits(ClusterSpec.Id id, ClusterResources min, ClusterResources max) {
Cluster cluster = clusters.get(id);
if (cluster == null)
cluster = new Cluster(id, min, max, Optional.empty(), Optional.empty());
else
cluster = cluster.withLimits(min, max);
return with(cluster);
} | cluster = cluster.withLimits(min, max); | public Application withClusterLimits(ClusterSpec.Id id, ClusterResources min, ClusterResources max) {
Cluster cluster = clusters.get(id);
if (cluster == null)
cluster = new Cluster(id, min, max, Optional.empty(), Optional.empty());
else
cluster = cluster.withLimits(min, max);
return with(cluster);
} | class Application {
private final ApplicationId id;
private final Map<ClusterSpec.Id, Cluster> clusters;
public Application(ApplicationId id) {
this(id, Map.of());
}
public Application(ApplicationId id, Collection<Cluster> clusters) {
this(id, clusters.stream().collect(Collectors.toMap(c -> c.id(), c -> c)));
}
private Application(ApplicationId id, Map<ClusterSpec.Id, Cluster> clusters) {
this.id = id;
this.clusters = clusters;
}
public ApplicationId id() { return id; }
public Map<ClusterSpec.Id, Cluster> clusters() { return clusters; }
public Optional<Cluster> cluster(ClusterSpec.Id id) {
return Optional.ofNullable(clusters.get(id));
}
public Application with(Cluster cluster) {
Map<ClusterSpec.Id, Cluster> clusters = new HashMap<>(this.clusters);
clusters.put(cluster.id(), cluster);
return new Application(id, clusters);
}
/**
* Returns an application with the given cluster having the min and max resource limits of the given cluster.
* If the cluster has a target which is not inside the new limits, the target is removed.
*/
/**
* Returns an application with the given target for the given cluster,
* if it exists and the target is within the bounds
*/
public Application withClusterTarget(ClusterSpec.Id id, ClusterResources target) {
Cluster cluster = clusters.get(id);
if (cluster == null) return this;
return with(cluster.withTarget(target));
}
@Override
public int hashCode() {
return id.hashCode();
}
@Override
public boolean equals(Object other) {
if (other == this) return true;
if ( ! (other instanceof Application)) return false;
return ((Application)other).id().equals(this.id());
}
@Override
public String toString() {
return "application '" + id + "'";
}
} | class Application {
private final ApplicationId id;
private final Map<ClusterSpec.Id, Cluster> clusters;
public Application(ApplicationId id) {
this(id, Map.of());
}
public Application(ApplicationId id, Collection<Cluster> clusters) {
this(id, clusters.stream().collect(Collectors.toMap(c -> c.id(), c -> c)));
}
private Application(ApplicationId id, Map<ClusterSpec.Id, Cluster> clusters) {
this.id = id;
this.clusters = clusters;
}
public ApplicationId id() { return id; }
public Map<ClusterSpec.Id, Cluster> clusters() { return clusters; }
public Optional<Cluster> cluster(ClusterSpec.Id id) {
return Optional.ofNullable(clusters.get(id));
}
public Application with(Cluster cluster) {
Map<ClusterSpec.Id, Cluster> clusters = new HashMap<>(this.clusters);
clusters.put(cluster.id(), cluster);
return new Application(id, clusters);
}
/**
* Returns an application with the given cluster having the min and max resource limits of the given cluster.
* If the cluster has a target which is not inside the new limits, the target is removed.
*/
/**
* Returns an application with the given target for the given cluster,
* if it exists and the target is within the bounds
*/
public Application withClusterTarget(ClusterSpec.Id id, ClusterResources target) {
Cluster cluster = clusters.get(id);
if (cluster == null) return this;
return with(cluster.withTarget(target));
}
@Override
public int hashCode() {
return id.hashCode();
}
@Override
public boolean equals(Object other) {
if (other == this) return true;
if ( ! (other instanceof Application)) return false;
return ((Application)other).id().equals(this.id());
}
@Override
public String toString() {
return "application '" + id + "'";
}
} |
wanted -> current | private String currentPlatform(Node node) {
return node.wantedDockerImage().repository() + ":" + node.wantedVersion();
} | return node.wantedDockerImage().repository() + ":" + node.wantedVersion(); | private String currentPlatform(Node node) {
String currentRepo = node.currentDockerImage().repository();
String wantedRepo = node.wantedDockerImage().repository();
return (currentRepo.equals(wantedRepo) ? "" : currentRepo + ":") + node.currentVersion();
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
private final Timeouts timeouts;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
this.timeouts = Timeouts.of(controller.system());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup:
case endTests: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (step.get().alwaysRun()) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) {
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy2(id.job(), setTheStage),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
platform),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if (prepareResponse.log != null)
logger.logAll(prepareResponse.log.stream()
.map(entry -> new LogEntry(0,
Instant.ofEpochMilli(entry.time),
LogEntry.typeOf(LogLevel.parse(entry.level)),
entry.message))
.collect(toList()));
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Schedule service restart on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case CERTIFICATE_NOT_READY:
if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + timeouts.endpointCertificate());
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return result;
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return controller.system().isCd() && startTime.plus(timeouts.capacity()).isAfter(controller.clock().instant())
? Optional.empty()
: Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
catch (EndpointCertificateException e) {
switch (e.type()) {
case CERT_NOT_AVAILABLE:
if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + timeouts.endpointCertificate());
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return Optional.empty();
default:
throw e;
}
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log("
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), timeouts.endpoint())) {
logger.log(WARNING, "Endpoints failed to show up within " + timeouts.endpoint().toMinutes() + " minutes!");
return Optional.of(error);
}
}
String failureReason = null;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(timeouts.nodesDown()));
if ( ! suspendedTooLong.isEmpty()) {
failureReason = "Some nodes have been suspended for more than " + timeouts.nodesDown().toMinutes() + " minutes:\n" +
suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n"));
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(timeouts.noNodesDown())))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
failureReason = "No nodes allowed to suspend to progress installation for " + timeouts.noNodesDown().toMinutes() + " minutes.";
else
failureReason = "Nodes not able to start with new application package.";
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!";
}
if (failureReason != null) {
logger.log("
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
logger.log("
logger.log(nodeList.not().in(nodeList.not().needsNewConfig()
.not().needsPlatformUpgrade()
.not().needsReboot()
.not().needsRestart()
.not().needsFirmwareUpgrade()
.not().needsOsUpgrade())
.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
logger.log(INFO, failureReason);
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.expectedDown().concat(nodeList.needsNewConfig()).asList().stream()
.distinct()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
if (run.stepInfo(installTester).get().startTime().get().plus(timeouts.tester()).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + timeouts.tester().toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (var endpoint : endpoints.get(zoneId)) {
boolean ready = controller.jobController().cloud().ready(endpoint.url());
if ( ! ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
/** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
DeploymentId deploymentId = new DeploymentId(id, zoneId);
if (controller.jobController().cloud().testerReady(deploymentId)) {
return true;
} else {
logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId);
return false;
}
}
private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
var policies = controller.routing().policies().get(new DeploymentId(id, zone));
for (var endpoint : endpoints.get(zone)) {
HostName endpointName = HostName.from(endpoint.dnsName());
var ipAddress = controller.jobController().cloud().resolveHostName(endpointName);
if (ipAddress.isEmpty()) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'.");
return false;
}
if (endpoint.routingMethod() == RoutingMethod.exclusive) {
var policy = policies.get(new RoutingPolicyId(id, ClusterSpec.Id.from(endpoint.name()), zone));
if (policy == null)
throw new IllegalStateException(endpoint + " has no matching policy in " + policies);
var cNameValue = controller.jobController().cloud().resolveCname(endpointName);
if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) {
logger.log(INFO, "CNAME '" + endpointName + "' points at " +
cNameValue.map(name -> "'" + name + "'").orElse("nothing") +
" but should point at load balancer '" + policy.canonicalName() + "'");
return false;
}
var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName());
if ( ! loadBalancerAddress.equals(ipAddress)) {
logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" +
policy.canonicalName() + "' (" + loadBalancerAddress.orElse("empty") + ") are not equal");
return false;
}
}
}
logEndpoints(endpoints, logger);
return true;
}
private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
zoneEndpoints.forEach((zone, endpoints) -> {
messages.add("- " + zone);
for (Endpoint endpoint : endpoints)
messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')");
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")),
"--- platform " + wantedPlatform(node.node()) + (node.needsPlatformUpgrade()
? " <-- " + currentPlatform(node.node())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String wantedPlatform(Node node) {
return node.wantedDockerImage().repository() + ":" + node.wantedVersion();
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
ZoneId zoneId = id.type().zone(controller.system());
deployments.add(new DeploymentId(id.application(), zoneId));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.routing().zoneEndpointsOf(deployments);
if ( ! endpoints.containsKey(zoneId)) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
logger.log("Starting tests ...");
TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup);
byte[] config = testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments));
controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config);
return Optional.of(running);
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id));
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
logger.log(INFO, "Sending failure notification to " + String.join(", ", recipients));
mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send);
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
private Optional<Mail> mailOf(Run run, List<String> recipients) {
switch (run.status()) {
case running:
case aborted:
case success:
return Optional.empty();
case outOfCapacity:
return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty();
case deploymentFailed:
return Optional.of(mails.deploymentFailure(run.id(), recipients));
case installationFailed:
return Optional.of(mails.installationFailure(run.id(), recipients));
case testFailure:
return Optional.of(mails.testFailure(run.id(), recipients));
case error:
case endpointCertificateTimeout:
return Optional.of(mails.systemError(run.id(), recipients));
default:
logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'");
return Optional.of(mails.systemError(run.id(), recipients));
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(! controller.system().isPublic(),
useTesterCertificate,
testerResourcesFor(zone, spec.requireInstance(id.application().instance())));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(timeouts.testerCertificate()),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId) {
ZoneId zoneId = runId.type().zone(controller.system());
return new DeploymentId(runId.tester().id(), zoneId);
}
static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) {
return spec.steps().stream()
.filter(step -> step.concerns(zone.environment()))
.findFirst()
.flatMap(step -> step.zones().get(0).testerFlavor())
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES);
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(boolean systemUsesAthenz, boolean useTesterCertificate, NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void logAll(List<LogEntry> messages) {
controller.jobController().log(id, step, messages);
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
static class Timeouts {
private final SystemName system;
private Timeouts(SystemName system) {
this.system = requireNonNull(system);
}
public static Timeouts of(SystemName system) {
return new Timeouts(system);
}
Duration capacity() { return Duration.ofMinutes(system.isCd() ? 5 : 0); }
Duration endpoint() { return Duration.ofMinutes(15); }
Duration endpointCertificate() { return Duration.ofMinutes(20); }
Duration tester() { return Duration.ofMinutes(30); }
Duration nodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 60); }
Duration noNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 120); }
Duration testerCertificate() { return Duration.ofMinutes(300); }
}
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
private final Timeouts timeouts;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
this.timeouts = Timeouts.of(controller.system());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup:
case endTests: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (step.get().alwaysRun()) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) {
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy2(id.job(), setTheStage),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
platform),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if (prepareResponse.log != null)
logger.logAll(prepareResponse.log.stream()
.map(entry -> new LogEntry(0,
Instant.ofEpochMilli(entry.time),
LogEntry.typeOf(LogLevel.parse(entry.level)),
entry.message))
.collect(toList()));
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Schedule service restart on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case CERTIFICATE_NOT_READY:
if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + timeouts.endpointCertificate());
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return result;
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return controller.system().isCd() && startTime.plus(timeouts.capacity()).isAfter(controller.clock().instant())
? Optional.empty()
: Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
catch (EndpointCertificateException e) {
switch (e.type()) {
case CERT_NOT_AVAILABLE:
if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + timeouts.endpointCertificate());
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return Optional.empty();
default:
throw e;
}
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log("
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), timeouts.endpoint())) {
logger.log(WARNING, "Endpoints failed to show up within " + timeouts.endpoint().toMinutes() + " minutes!");
return Optional.of(error);
}
}
String failureReason = null;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(timeouts.nodesDown()));
if ( ! suspendedTooLong.isEmpty()) {
failureReason = "Some nodes have been suspended for more than " + timeouts.nodesDown().toMinutes() + " minutes:\n" +
suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n"));
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(timeouts.noNodesDown())))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
failureReason = "No nodes allowed to suspend to progress installation for " + timeouts.noNodesDown().toMinutes() + " minutes.";
else
failureReason = "Nodes not able to start with new application package.";
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!";
}
if (failureReason != null) {
logger.log("
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
logger.log("
logger.log(nodeList.not().in(nodeList.not().needsNewConfig()
.not().needsPlatformUpgrade()
.not().needsReboot()
.not().needsRestart()
.not().needsFirmwareUpgrade()
.not().needsOsUpgrade())
.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
logger.log(INFO, failureReason);
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.expectedDown().concat(nodeList.needsNewConfig()).asList().stream()
.distinct()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
if (run.stepInfo(installTester).get().startTime().get().plus(timeouts.tester()).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + timeouts.tester().toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (var endpoint : endpoints.get(zoneId)) {
boolean ready = controller.jobController().cloud().ready(endpoint.url());
if ( ! ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
/** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
DeploymentId deploymentId = new DeploymentId(id, zoneId);
if (controller.jobController().cloud().testerReady(deploymentId)) {
return true;
} else {
logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId);
return false;
}
}
private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
var policies = controller.routing().policies().get(new DeploymentId(id, zone));
for (var endpoint : endpoints.get(zone)) {
HostName endpointName = HostName.from(endpoint.dnsName());
var ipAddress = controller.jobController().cloud().resolveHostName(endpointName);
if (ipAddress.isEmpty()) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'.");
return false;
}
if (endpoint.routingMethod() == RoutingMethod.exclusive) {
var policy = policies.get(new RoutingPolicyId(id, ClusterSpec.Id.from(endpoint.name()), zone));
if (policy == null)
throw new IllegalStateException(endpoint + " has no matching policy in " + policies);
var cNameValue = controller.jobController().cloud().resolveCname(endpointName);
if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) {
logger.log(INFO, "CNAME '" + endpointName + "' points at " +
cNameValue.map(name -> "'" + name + "'").orElse("nothing") +
" but should point at load balancer '" + policy.canonicalName() + "'");
return false;
}
var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName());
if ( ! loadBalancerAddress.equals(ipAddress)) {
logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" +
policy.canonicalName() + "' (" + loadBalancerAddress.orElse("empty") + ") are not equal");
return false;
}
}
}
logEndpoints(endpoints, logger);
return true;
}
private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
zoneEndpoints.forEach((zone, endpoints) -> {
messages.add("- " + zone);
for (Endpoint endpoint : endpoints)
messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')");
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")),
"--- platform " + wantedPlatform(node.node()) + (node.needsPlatformUpgrade()
? " <-- " + currentPlatform(node.node())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String wantedPlatform(Node node) {
return node.wantedDockerImage().repository() + ":" + node.wantedVersion();
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
ZoneId zoneId = id.type().zone(controller.system());
deployments.add(new DeploymentId(id.application(), zoneId));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.routing().zoneEndpointsOf(deployments);
if ( ! endpoints.containsKey(zoneId)) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
logger.log("Starting tests ...");
TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup);
byte[] config = testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments));
controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config);
return Optional.of(running);
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id));
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
logger.log(INFO, "Sending failure notification to " + String.join(", ", recipients));
mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send);
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
private Optional<Mail> mailOf(Run run, List<String> recipients) {
switch (run.status()) {
case running:
case aborted:
case success:
return Optional.empty();
case outOfCapacity:
return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty();
case deploymentFailed:
return Optional.of(mails.deploymentFailure(run.id(), recipients));
case installationFailed:
return Optional.of(mails.installationFailure(run.id(), recipients));
case testFailure:
return Optional.of(mails.testFailure(run.id(), recipients));
case error:
case endpointCertificateTimeout:
return Optional.of(mails.systemError(run.id(), recipients));
default:
logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'");
return Optional.of(mails.systemError(run.id(), recipients));
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(! controller.system().isPublic(),
useTesterCertificate,
testerResourcesFor(zone, spec.requireInstance(id.application().instance())));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(timeouts.testerCertificate()),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId) {
ZoneId zoneId = runId.type().zone(controller.system());
return new DeploymentId(runId.tester().id(), zoneId);
}
static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) {
return spec.steps().stream()
.filter(step -> step.concerns(zone.environment()))
.findFirst()
.flatMap(step -> step.zones().get(0).testerFlavor())
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES);
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(boolean systemUsesAthenz, boolean useTesterCertificate, NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void logAll(List<LogEntry> messages) {
controller.jobController().log(id, step, messages);
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
static class Timeouts {
private final SystemName system;
private Timeouts(SystemName system) {
this.system = requireNonNull(system);
}
public static Timeouts of(SystemName system) {
return new Timeouts(system);
}
Duration capacity() { return Duration.ofMinutes(system.isCd() ? 5 : 0); }
Duration endpoint() { return Duration.ofMinutes(15); }
Duration endpointCertificate() { return Duration.ofMinutes(20); }
Duration tester() { return Duration.ofMinutes(30); }
Duration nodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 60); }
Duration noNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 120); }
Duration testerCertificate() { return Duration.ofMinutes(300); }
}
} |
Fixed, and did your suggestion about only showing current if different from wanted | private String currentPlatform(Node node) {
return node.wantedDockerImage().repository() + ":" + node.wantedVersion();
} | return node.wantedDockerImage().repository() + ":" + node.wantedVersion(); | private String currentPlatform(Node node) {
String currentRepo = node.currentDockerImage().repository();
String wantedRepo = node.wantedDockerImage().repository();
return (currentRepo.equals(wantedRepo) ? "" : currentRepo + ":") + node.currentVersion();
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
private final Timeouts timeouts;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
this.timeouts = Timeouts.of(controller.system());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup:
case endTests: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (step.get().alwaysRun()) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) {
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy2(id.job(), setTheStage),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
platform),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if (prepareResponse.log != null)
logger.logAll(prepareResponse.log.stream()
.map(entry -> new LogEntry(0,
Instant.ofEpochMilli(entry.time),
LogEntry.typeOf(LogLevel.parse(entry.level)),
entry.message))
.collect(toList()));
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Schedule service restart on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case CERTIFICATE_NOT_READY:
if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + timeouts.endpointCertificate());
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return result;
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return controller.system().isCd() && startTime.plus(timeouts.capacity()).isAfter(controller.clock().instant())
? Optional.empty()
: Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
catch (EndpointCertificateException e) {
switch (e.type()) {
case CERT_NOT_AVAILABLE:
if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + timeouts.endpointCertificate());
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return Optional.empty();
default:
throw e;
}
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log("
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), timeouts.endpoint())) {
logger.log(WARNING, "Endpoints failed to show up within " + timeouts.endpoint().toMinutes() + " minutes!");
return Optional.of(error);
}
}
String failureReason = null;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(timeouts.nodesDown()));
if ( ! suspendedTooLong.isEmpty()) {
failureReason = "Some nodes have been suspended for more than " + timeouts.nodesDown().toMinutes() + " minutes:\n" +
suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n"));
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(timeouts.noNodesDown())))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
failureReason = "No nodes allowed to suspend to progress installation for " + timeouts.noNodesDown().toMinutes() + " minutes.";
else
failureReason = "Nodes not able to start with new application package.";
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!";
}
if (failureReason != null) {
logger.log("
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
logger.log("
logger.log(nodeList.not().in(nodeList.not().needsNewConfig()
.not().needsPlatformUpgrade()
.not().needsReboot()
.not().needsRestart()
.not().needsFirmwareUpgrade()
.not().needsOsUpgrade())
.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
logger.log(INFO, failureReason);
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.expectedDown().concat(nodeList.needsNewConfig()).asList().stream()
.distinct()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
if (run.stepInfo(installTester).get().startTime().get().plus(timeouts.tester()).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + timeouts.tester().toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (var endpoint : endpoints.get(zoneId)) {
boolean ready = controller.jobController().cloud().ready(endpoint.url());
if ( ! ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
/** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
DeploymentId deploymentId = new DeploymentId(id, zoneId);
if (controller.jobController().cloud().testerReady(deploymentId)) {
return true;
} else {
logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId);
return false;
}
}
private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
var policies = controller.routing().policies().get(new DeploymentId(id, zone));
for (var endpoint : endpoints.get(zone)) {
HostName endpointName = HostName.from(endpoint.dnsName());
var ipAddress = controller.jobController().cloud().resolveHostName(endpointName);
if (ipAddress.isEmpty()) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'.");
return false;
}
if (endpoint.routingMethod() == RoutingMethod.exclusive) {
var policy = policies.get(new RoutingPolicyId(id, ClusterSpec.Id.from(endpoint.name()), zone));
if (policy == null)
throw new IllegalStateException(endpoint + " has no matching policy in " + policies);
var cNameValue = controller.jobController().cloud().resolveCname(endpointName);
if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) {
logger.log(INFO, "CNAME '" + endpointName + "' points at " +
cNameValue.map(name -> "'" + name + "'").orElse("nothing") +
" but should point at load balancer '" + policy.canonicalName() + "'");
return false;
}
var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName());
if ( ! loadBalancerAddress.equals(ipAddress)) {
logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" +
policy.canonicalName() + "' (" + loadBalancerAddress.orElse("empty") + ") are not equal");
return false;
}
}
}
logEndpoints(endpoints, logger);
return true;
}
private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
zoneEndpoints.forEach((zone, endpoints) -> {
messages.add("- " + zone);
for (Endpoint endpoint : endpoints)
messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')");
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")),
"--- platform " + wantedPlatform(node.node()) + (node.needsPlatformUpgrade()
? " <-- " + currentPlatform(node.node())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String wantedPlatform(Node node) {
return node.wantedDockerImage().repository() + ":" + node.wantedVersion();
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
ZoneId zoneId = id.type().zone(controller.system());
deployments.add(new DeploymentId(id.application(), zoneId));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.routing().zoneEndpointsOf(deployments);
if ( ! endpoints.containsKey(zoneId)) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
logger.log("Starting tests ...");
TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup);
byte[] config = testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments));
controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config);
return Optional.of(running);
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id));
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
logger.log(INFO, "Sending failure notification to " + String.join(", ", recipients));
mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send);
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
private Optional<Mail> mailOf(Run run, List<String> recipients) {
switch (run.status()) {
case running:
case aborted:
case success:
return Optional.empty();
case outOfCapacity:
return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty();
case deploymentFailed:
return Optional.of(mails.deploymentFailure(run.id(), recipients));
case installationFailed:
return Optional.of(mails.installationFailure(run.id(), recipients));
case testFailure:
return Optional.of(mails.testFailure(run.id(), recipients));
case error:
case endpointCertificateTimeout:
return Optional.of(mails.systemError(run.id(), recipients));
default:
logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'");
return Optional.of(mails.systemError(run.id(), recipients));
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(! controller.system().isPublic(),
useTesterCertificate,
testerResourcesFor(zone, spec.requireInstance(id.application().instance())));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(timeouts.testerCertificate()),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId) {
ZoneId zoneId = runId.type().zone(controller.system());
return new DeploymentId(runId.tester().id(), zoneId);
}
static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) {
return spec.steps().stream()
.filter(step -> step.concerns(zone.environment()))
.findFirst()
.flatMap(step -> step.zones().get(0).testerFlavor())
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES);
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(boolean systemUsesAthenz, boolean useTesterCertificate, NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void logAll(List<LogEntry> messages) {
controller.jobController().log(id, step, messages);
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
static class Timeouts {
private final SystemName system;
private Timeouts(SystemName system) {
this.system = requireNonNull(system);
}
public static Timeouts of(SystemName system) {
return new Timeouts(system);
}
Duration capacity() { return Duration.ofMinutes(system.isCd() ? 5 : 0); }
Duration endpoint() { return Duration.ofMinutes(15); }
Duration endpointCertificate() { return Duration.ofMinutes(20); }
Duration tester() { return Duration.ofMinutes(30); }
Duration nodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 60); }
Duration noNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 120); }
Duration testerCertificate() { return Duration.ofMinutes(300); }
}
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
private final Timeouts timeouts;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
this.timeouts = Timeouts.of(controller.system());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup:
case endTests: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (step.get().alwaysRun()) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) {
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy2(id.job(), setTheStage),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
platform),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if (prepareResponse.log != null)
logger.logAll(prepareResponse.log.stream()
.map(entry -> new LogEntry(0,
Instant.ofEpochMilli(entry.time),
LogEntry.typeOf(LogLevel.parse(entry.level)),
entry.message))
.collect(toList()));
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Schedule service restart on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case CERTIFICATE_NOT_READY:
if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + timeouts.endpointCertificate());
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return result;
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return controller.system().isCd() && startTime.plus(timeouts.capacity()).isAfter(controller.clock().instant())
? Optional.empty()
: Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
catch (EndpointCertificateException e) {
switch (e.type()) {
case CERT_NOT_AVAILABLE:
if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + timeouts.endpointCertificate());
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return Optional.empty();
default:
throw e;
}
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log("
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), timeouts.endpoint())) {
logger.log(WARNING, "Endpoints failed to show up within " + timeouts.endpoint().toMinutes() + " minutes!");
return Optional.of(error);
}
}
String failureReason = null;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(timeouts.nodesDown()));
if ( ! suspendedTooLong.isEmpty()) {
failureReason = "Some nodes have been suspended for more than " + timeouts.nodesDown().toMinutes() + " minutes:\n" +
suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n"));
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(timeouts.noNodesDown())))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
failureReason = "No nodes allowed to suspend to progress installation for " + timeouts.noNodesDown().toMinutes() + " minutes.";
else
failureReason = "Nodes not able to start with new application package.";
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!";
}
if (failureReason != null) {
logger.log("
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
logger.log("
logger.log(nodeList.not().in(nodeList.not().needsNewConfig()
.not().needsPlatformUpgrade()
.not().needsReboot()
.not().needsRestart()
.not().needsFirmwareUpgrade()
.not().needsOsUpgrade())
.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
logger.log(INFO, failureReason);
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.expectedDown().concat(nodeList.needsNewConfig()).asList().stream()
.distinct()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
if (run.stepInfo(installTester).get().startTime().get().plus(timeouts.tester()).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + timeouts.tester().toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (var endpoint : endpoints.get(zoneId)) {
boolean ready = controller.jobController().cloud().ready(endpoint.url());
if ( ! ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
/** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
DeploymentId deploymentId = new DeploymentId(id, zoneId);
if (controller.jobController().cloud().testerReady(deploymentId)) {
return true;
} else {
logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId);
return false;
}
}
private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
var policies = controller.routing().policies().get(new DeploymentId(id, zone));
for (var endpoint : endpoints.get(zone)) {
HostName endpointName = HostName.from(endpoint.dnsName());
var ipAddress = controller.jobController().cloud().resolveHostName(endpointName);
if (ipAddress.isEmpty()) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'.");
return false;
}
if (endpoint.routingMethod() == RoutingMethod.exclusive) {
var policy = policies.get(new RoutingPolicyId(id, ClusterSpec.Id.from(endpoint.name()), zone));
if (policy == null)
throw new IllegalStateException(endpoint + " has no matching policy in " + policies);
var cNameValue = controller.jobController().cloud().resolveCname(endpointName);
if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) {
logger.log(INFO, "CNAME '" + endpointName + "' points at " +
cNameValue.map(name -> "'" + name + "'").orElse("nothing") +
" but should point at load balancer '" + policy.canonicalName() + "'");
return false;
}
var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName());
if ( ! loadBalancerAddress.equals(ipAddress)) {
logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" +
policy.canonicalName() + "' (" + loadBalancerAddress.orElse("empty") + ") are not equal");
return false;
}
}
}
logEndpoints(endpoints, logger);
return true;
}
private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
zoneEndpoints.forEach((zone, endpoints) -> {
messages.add("- " + zone);
for (Endpoint endpoint : endpoints)
messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')");
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")),
"--- platform " + wantedPlatform(node.node()) + (node.needsPlatformUpgrade()
? " <-- " + currentPlatform(node.node())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String wantedPlatform(Node node) {
return node.wantedDockerImage().repository() + ":" + node.wantedVersion();
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
ZoneId zoneId = id.type().zone(controller.system());
deployments.add(new DeploymentId(id.application(), zoneId));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.routing().zoneEndpointsOf(deployments);
if ( ! endpoints.containsKey(zoneId)) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
logger.log("Starting tests ...");
TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup);
byte[] config = testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments));
controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config);
return Optional.of(running);
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id));
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
logger.log(INFO, "Sending failure notification to " + String.join(", ", recipients));
mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send);
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
private Optional<Mail> mailOf(Run run, List<String> recipients) {
switch (run.status()) {
case running:
case aborted:
case success:
return Optional.empty();
case outOfCapacity:
return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty();
case deploymentFailed:
return Optional.of(mails.deploymentFailure(run.id(), recipients));
case installationFailed:
return Optional.of(mails.installationFailure(run.id(), recipients));
case testFailure:
return Optional.of(mails.testFailure(run.id(), recipients));
case error:
case endpointCertificateTimeout:
return Optional.of(mails.systemError(run.id(), recipients));
default:
logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'");
return Optional.of(mails.systemError(run.id(), recipients));
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(! controller.system().isPublic(),
useTesterCertificate,
testerResourcesFor(zone, spec.requireInstance(id.application().instance())));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(timeouts.testerCertificate()),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId) {
ZoneId zoneId = runId.type().zone(controller.system());
return new DeploymentId(runId.tester().id(), zoneId);
}
static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) {
return spec.steps().stream()
.filter(step -> step.concerns(zone.environment()))
.findFirst()
.flatMap(step -> step.zones().get(0).testerFlavor())
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES);
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(boolean systemUsesAthenz, boolean useTesterCertificate, NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void logAll(List<LogEntry> messages) {
controller.jobController().log(id, step, messages);
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
static class Timeouts {
private final SystemName system;
private Timeouts(SystemName system) {
this.system = requireNonNull(system);
}
public static Timeouts of(SystemName system) {
return new Timeouts(system);
}
Duration capacity() { return Duration.ofMinutes(system.isCd() ? 5 : 0); }
Duration endpoint() { return Duration.ofMinutes(15); }
Duration endpointCertificate() { return Duration.ofMinutes(20); }
Duration tester() { return Duration.ofMinutes(30); }
Duration nodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 60); }
Duration noNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 120); }
Duration testerCertificate() { return Duration.ofMinutes(300); }
}
} |
```suggestion getLogger().log(LogLevel.DEBUG, () -> "Stem '"+current.stringValue()+"' mode "+index.getStemMode() ``` | private Item stem(BlockItem current, StemContext context, Index index) {
Item blockAsItem = (Item)current;
CompositeItem composite;
List<StemList> segments = linguistics.getStemmer().stem(current.stringValue(), index.getStemMode(), context.language);
String indexName = current.getIndexName();
Substring substring = getOffsets(current);
if (segments.size() == 1) {
getLogger().log(LogLevel.DEBUG, "Stem '"+current.stringValue()+"' mode "+index.getStemMode()
+" and language '"+context.language+"' -> '"+segments.get(0)+"'");
TaggableItem w = singleWordSegment(current, segments.get(0), index, substring, context.insidePhrase);
setMetaData(current, context.reverseConnectivity, w);
return (Item) w;
} else if (getLogger().isLoggable(LogLevel.DEBUG)) {
var buf = new StringBuilder();
buf.append("Stem '").append(current.stringValue());
buf.append("' mode ").append(index.getStemMode());
buf.append(" and language '").append(context.language).append("' ->");
for (StemList segment : segments) {
buf.append(" '").append(segment).append("'");
}
getLogger().log(LogLevel.DEBUG, buf.toString());
}
if (context.isCJK)
composite = chooseCompositeForCJK(current, ((Item) current).getParent(), indexName);
else
composite = chooseComposite(current, ((Item) current).getParent(), indexName);
for (StemList segment : segments) {
getLogger().log(LogLevel.DEBUG, "Stem to multiple segments '"+segment+"'");
TaggableItem w = singleWordSegment(current, segment, index, substring, context.insidePhrase);
if (composite instanceof AndSegmentItem) {
setSignificance(w, current);
}
composite.addItem((Item) w);
}
if (composite instanceof AndSegmentItem) {
andSegmentConnectivity(current, context.reverseConnectivity, composite);
}
copyAttributes(blockAsItem, composite);
composite.lock();
if (composite instanceof PhraseSegmentItem) {
PhraseSegmentItem replacement = (PhraseSegmentItem) composite;
setSignificance(replacement, current);
phraseSegmentConnectivity(current, context.reverseConnectivity, replacement);
}
return composite;
} | getLogger().log(LogLevel.DEBUG, "Stem '"+current.stringValue()+"' mode "+index.getStemMode() | private Item stem(BlockItem current, StemContext context, Index index) {
Item blockAsItem = (Item)current;
CompositeItem composite;
List<StemList> segments = linguistics.getStemmer().stem(current.stringValue(), index.getStemMode(), context.language);
String indexName = current.getIndexName();
Substring substring = getOffsets(current);
if (segments.size() == 1) {
getLogger().log(LogLevel.DEBUG, () -> "Stem '"+current.stringValue()+"' mode "+index.getStemMode()
+" and language '"+context.language+"' -> '"+segments.get(0)+"'");
TaggableItem w = singleWordSegment(current, segments.get(0), index, substring, context.insidePhrase);
setMetaData(current, context.reverseConnectivity, w);
return (Item) w;
} else if (getLogger().isLoggable(LogLevel.DEBUG)) {
var buf = new StringBuilder();
buf.append("Stem '").append(current.stringValue());
buf.append("' mode ").append(index.getStemMode());
buf.append(" and language '").append(context.language).append("' ->");
for (StemList segment : segments) {
buf.append(" '").append(segment).append("'");
}
getLogger().log(LogLevel.DEBUG, buf.toString());
}
if (context.isCJK)
composite = chooseCompositeForCJK(current, ((Item) current).getParent(), indexName);
else
composite = chooseComposite(current, ((Item) current).getParent(), indexName);
for (StemList segment : segments) {
getLogger().log(LogLevel.DEBUG, () -> "Stem to multiple segments '"+segment+"'");
TaggableItem w = singleWordSegment(current, segment, index, substring, context.insidePhrase);
if (composite instanceof AndSegmentItem) {
setSignificance(w, current);
}
composite.addItem((Item) w);
}
if (composite instanceof AndSegmentItem) {
andSegmentConnectivity(current, context.reverseConnectivity, composite);
}
copyAttributes(blockAsItem, composite);
composite.lock();
if (composite instanceof PhraseSegmentItem) {
PhraseSegmentItem replacement = (PhraseSegmentItem) composite;
setSignificance(replacement, current);
phraseSegmentConnectivity(current, context.reverseConnectivity, replacement);
}
return composite;
} | class StemContext {
public boolean isCJK = false;
public boolean insidePhrase = false;
public Language language = null;
public IndexFacts.Session indexFacts = null;
public Map<Item, TaggableItem> reverseConnectivity = null;
} | class StemContext {
public boolean isCJK = false;
public boolean insidePhrase = false;
public Language language = null;
public IndexFacts.Session indexFacts = null;
public Map<Item, TaggableItem> reverseConnectivity = null;
} |
```suggestion log.log(Level.FINEST, () -> "getStemmerForLanguage '"+language+"' mode: "+stemMode); ``` | private Stemmer getStemmerForLanguage(Language language, StemMode stemMode) {
log.log(Level.FINEST, "getStemmerForLanguage '"+language+"' mode: "+stemMode);
if (language == null || Language.ENGLISH.equals(language) || StemMode.NONE.equals(stemMode)) {
return null;
}
SnowballStemmer.ALGORITHM alg;
switch (language) {
case DANISH:
alg = SnowballStemmer.ALGORITHM.DANISH;
break;
case DUTCH:
alg = SnowballStemmer.ALGORITHM.DUTCH;
break;
case FINNISH:
alg = SnowballStemmer.ALGORITHM.FINNISH;
break;
case FRENCH:
alg = SnowballStemmer.ALGORITHM.FRENCH;
break;
case GERMAN:
alg = SnowballStemmer.ALGORITHM.GERMAN;
break;
case HUNGARIAN:
alg = SnowballStemmer.ALGORITHM.HUNGARIAN;
break;
case IRISH:
alg = SnowballStemmer.ALGORITHM.IRISH;
break;
case ITALIAN:
alg = SnowballStemmer.ALGORITHM.ITALIAN;
break;
case NORWEGIAN_BOKMAL:
case NORWEGIAN_NYNORSK:
alg = SnowballStemmer.ALGORITHM.NORWEGIAN;
break;
case PORTUGUESE:
alg = SnowballStemmer.ALGORITHM.PORTUGUESE;
break;
case ROMANIAN:
alg = SnowballStemmer.ALGORITHM.ROMANIAN;
break;
case RUSSIAN:
alg = SnowballStemmer.ALGORITHM.RUSSIAN;
break;
case SPANISH:
alg = SnowballStemmer.ALGORITHM.SPANISH;
break;
case SWEDISH:
alg = SnowballStemmer.ALGORITHM.SWEDISH;
break;
case TURKISH:
alg = SnowballStemmer.ALGORITHM.TURKISH;
break;
case ENGLISH:
alg = SnowballStemmer.ALGORITHM.ENGLISH;
break;
default:
return null;
}
return new SnowballStemmer(alg);
} | log.log(Level.FINEST, "getStemmerForLanguage '"+language+"' mode: "+stemMode); | private Stemmer getStemmerForLanguage(Language language, StemMode stemMode) {
log.log(Level.FINEST, () -> "getStemmerForLanguage '"+language+"' mode: "+stemMode);
if (language == null || Language.ENGLISH.equals(language) || StemMode.NONE.equals(stemMode)) {
return null;
}
SnowballStemmer.ALGORITHM alg;
switch (language) {
case DANISH:
alg = SnowballStemmer.ALGORITHM.DANISH;
break;
case DUTCH:
alg = SnowballStemmer.ALGORITHM.DUTCH;
break;
case FINNISH:
alg = SnowballStemmer.ALGORITHM.FINNISH;
break;
case FRENCH:
alg = SnowballStemmer.ALGORITHM.FRENCH;
break;
case GERMAN:
alg = SnowballStemmer.ALGORITHM.GERMAN;
break;
case HUNGARIAN:
alg = SnowballStemmer.ALGORITHM.HUNGARIAN;
break;
case IRISH:
alg = SnowballStemmer.ALGORITHM.IRISH;
break;
case ITALIAN:
alg = SnowballStemmer.ALGORITHM.ITALIAN;
break;
case NORWEGIAN_BOKMAL:
case NORWEGIAN_NYNORSK:
alg = SnowballStemmer.ALGORITHM.NORWEGIAN;
break;
case PORTUGUESE:
alg = SnowballStemmer.ALGORITHM.PORTUGUESE;
break;
case ROMANIAN:
alg = SnowballStemmer.ALGORITHM.ROMANIAN;
break;
case RUSSIAN:
alg = SnowballStemmer.ALGORITHM.RUSSIAN;
break;
case SPANISH:
alg = SnowballStemmer.ALGORITHM.SPANISH;
break;
case SWEDISH:
alg = SnowballStemmer.ALGORITHM.SWEDISH;
break;
case TURKISH:
alg = SnowballStemmer.ALGORITHM.TURKISH;
break;
case ENGLISH:
alg = SnowballStemmer.ALGORITHM.ENGLISH;
break;
default:
return null;
}
return new SnowballStemmer(alg);
} | class OpenNlpTokenizer implements Tokenizer {
private final static int SPACE_CODE = 32;
private static final Logger log = Logger.getLogger(OpenNlpTokenizer.class.getName());
private final Normalizer normalizer;
private final Transformer transformer;
private final SimpleTokenizer simpleTokenizer;
public OpenNlpTokenizer() {
this(new SimpleNormalizer(), new SimpleTransformer());
}
public OpenNlpTokenizer(Normalizer normalizer, Transformer transformer) {
this.normalizer = normalizer;
this.transformer = transformer;
simpleTokenizer = new SimpleTokenizer(normalizer, transformer);
}
@Override
public Iterable<Token> tokenize(String input, Language language, StemMode stemMode, boolean removeAccents) {
if (input.isEmpty()) return Collections.emptyList();
Stemmer stemmer = getStemmerForLanguage(language, stemMode);
if (stemmer == null) {
return simpleTokenizer.tokenize(input, language, stemMode, removeAccents);
}
List<Token> tokens = new ArrayList<>();
int nextCode = input.codePointAt(0);
TokenType prevType = SimpleTokenType.valueOf(nextCode);
for (int prev = 0, next = Character.charCount(nextCode); next <= input.length(); ) {
nextCode = next < input.length() ? input.codePointAt(next) : SPACE_CODE;
TokenType nextType = SimpleTokenType.valueOf(nextCode);
if (!prevType.isIndexable() || !nextType.isIndexable()) {
String original = input.substring(prev, next);
String token = processToken(original, language, stemMode, removeAccents, stemmer);
tokens.add(new SimpleToken(original).setOffset(prev)
.setType(prevType)
.setTokenString(token));
prev = next;
prevType = nextType;
}
next += Character.charCount(nextCode);
}
return tokens;
}
private String processToken(String token, Language language, StemMode stemMode, boolean removeAccents,
Stemmer stemmer) {
log.log(Level.FINEST, "processToken '"+token+"'");
token = normalizer.normalize(token);
token = LinguisticsCase.toLowerCase(token);
if (removeAccents)
token = transformer.accentDrop(token, language);
if (stemMode != StemMode.NONE) {
String oldToken = token;
token = doStemming(token, stemmer);
log.log(Level.FINEST, "stem '"+oldToken+"' to '"+token+"'");
}
log.log(Level.FINEST, "processed token is: "+token);
return token;
}
private String doStemming(String token, Stemmer stemmer) {
return stemmer.stem(token).toString();
}
} | class OpenNlpTokenizer implements Tokenizer {
private final static int SPACE_CODE = 32;
private static final Logger log = Logger.getLogger(OpenNlpTokenizer.class.getName());
private final Normalizer normalizer;
private final Transformer transformer;
private final SimpleTokenizer simpleTokenizer;
public OpenNlpTokenizer() {
this(new SimpleNormalizer(), new SimpleTransformer());
}
public OpenNlpTokenizer(Normalizer normalizer, Transformer transformer) {
this.normalizer = normalizer;
this.transformer = transformer;
simpleTokenizer = new SimpleTokenizer(normalizer, transformer);
}
@Override
public Iterable<Token> tokenize(String input, Language language, StemMode stemMode, boolean removeAccents) {
if (input.isEmpty()) return Collections.emptyList();
Stemmer stemmer = getStemmerForLanguage(language, stemMode);
if (stemmer == null) {
return simpleTokenizer.tokenize(input, language, stemMode, removeAccents);
}
List<Token> tokens = new ArrayList<>();
int nextCode = input.codePointAt(0);
TokenType prevType = SimpleTokenType.valueOf(nextCode);
for (int prev = 0, next = Character.charCount(nextCode); next <= input.length(); ) {
nextCode = next < input.length() ? input.codePointAt(next) : SPACE_CODE;
TokenType nextType = SimpleTokenType.valueOf(nextCode);
if (!prevType.isIndexable() || !nextType.isIndexable()) {
String original = input.substring(prev, next);
String token = processToken(original, language, stemMode, removeAccents, stemmer);
tokens.add(new SimpleToken(original).setOffset(prev)
.setType(prevType)
.setTokenString(token));
prev = next;
prevType = nextType;
}
next += Character.charCount(nextCode);
}
return tokens;
}
private String processToken(String token, Language language, StemMode stemMode, boolean removeAccents,
Stemmer stemmer) {
final String original = token;
log.log(Level.FINEST, () -> "processToken '"+original+"'");
token = normalizer.normalize(token);
token = LinguisticsCase.toLowerCase(token);
if (removeAccents)
token = transformer.accentDrop(token, language);
if (stemMode != StemMode.NONE) {
final String oldToken = token;
token = doStemming(token, stemmer);
final String newToken = token;
log.log(Level.FINEST, () -> "stem '"+oldToken+"' to '"+newToken+"'");
}
final String result = token;
log.log(Level.FINEST, () -> "processed token is: "+result);
return result;
}
private String doStemming(String token, Stemmer stemmer) {
return stemmer.stem(token).toString();
}
} |
```suggestion log.log(Level.FINE, () -> "guessing language "+result.get()+" from input: "+input); ``` | private static Language guessLanguageUsingOptimaize(String input) {
Optional<LdLocale> result = languageDetector.detect(textObjectFactory.forText(input));
if ( ! result.isPresent()) return Language.UNKNOWN;
log.log(Level.FINE, "guessing language "+result.get()+" from input: "+input);
return Language.fromLocale(new Locale(result.get().getLanguage()));
} | log.log(Level.FINE, "guessing language "+result.get()+" from input: "+input); | private static Language guessLanguageUsingOptimaize(String input) {
Optional<LdLocale> result = languageDetector.detect(textObjectFactory.forText(input));
if ( ! result.isPresent()) return Language.UNKNOWN;
log.log(Level.FINE, () -> "guessing language "+result.get()+" from input: "+input);
return Language.fromLocale(new Locale(result.get().getLanguage()));
} | class OptimaizeDetector implements Detector {
static private Object initGuard = new Object();
static private TextObjectFactory textObjectFactory = null;
static private LanguageDetector languageDetector = null;
static private final Logger log = Logger.getLogger(OptimaizeDetector.class.getName());
static private void initOptimaize() {
synchronized (initGuard) {
if ((textObjectFactory != null) && (languageDetector != null)) return;
List<LanguageProfile> languageProfiles;
try {
languageProfiles = new LanguageProfileReader().readAllBuiltIn();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
languageDetector = LanguageDetectorBuilder.create(NgramExtractors.standard())
.withProfiles(languageProfiles)
.build();
textObjectFactory = CommonTextObjectFactories.forDetectingOnLargeText();
}
}
private SimpleDetector simpleDetector = new SimpleDetector();
public OptimaizeDetector() {
initOptimaize();
}
@Override
public Detection detect(byte[] input, int offset, int length, Hint hint) {
return new Detection(guessLanguage(input, offset, length), simpleDetector.guessEncoding(input), false);
}
@Override
public Detection detect(ByteBuffer input, Hint hint) {
byte[] buf = new byte[input.remaining()];
input.get(buf, 0, buf.length);
return detect(buf, 0, buf.length, hint);
}
@Override
public Detection detect(String input, Hint hint) {
return new Detection(guessLanguage(input), Utf8.getCharset().name(), false);
}
private Language guessLanguage(byte[] buf, int offset, int length) {
return guessLanguage(Utf8.toString(buf, offset, length));
}
public Language guessLanguage(String input) {
if (input == null || input.length() == 0) return Language.UNKNOWN;
Language result = simpleDetector.guessLanguage(input);
if (result != Language.UNKNOWN) return result;
return guessLanguageUsingOptimaize(input);
}
} | class OptimaizeDetector implements Detector {
static private Object initGuard = new Object();
static private TextObjectFactory textObjectFactory = null;
static private LanguageDetector languageDetector = null;
static private final Logger log = Logger.getLogger(OptimaizeDetector.class.getName());
static private void initOptimaize() {
synchronized (initGuard) {
if ((textObjectFactory != null) && (languageDetector != null)) return;
List<LanguageProfile> languageProfiles;
try {
languageProfiles = new LanguageProfileReader().readAllBuiltIn();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
languageDetector = LanguageDetectorBuilder.create(NgramExtractors.standard())
.withProfiles(languageProfiles)
.build();
textObjectFactory = CommonTextObjectFactories.forDetectingOnLargeText();
}
}
private SimpleDetector simpleDetector = new SimpleDetector();
public OptimaizeDetector() {
initOptimaize();
}
@Override
public Detection detect(byte[] input, int offset, int length, Hint hint) {
return new Detection(guessLanguage(input, offset, length), simpleDetector.guessEncoding(input), false);
}
@Override
public Detection detect(ByteBuffer input, Hint hint) {
byte[] buf = new byte[input.remaining()];
input.get(buf, 0, buf.length);
return detect(buf, 0, buf.length, hint);
}
@Override
public Detection detect(String input, Hint hint) {
return new Detection(guessLanguage(input), Utf8.getCharset().name(), false);
}
private Language guessLanguage(byte[] buf, int offset, int length) {
return guessLanguage(Utf8.toString(buf, offset, length));
}
public Language guessLanguage(String input) {
if (input == null || input.length() == 0) return Language.UNKNOWN;
Language result = simpleDetector.guessLanguage(input);
if (result != Language.UNKNOWN) return result;
return guessLanguageUsingOptimaize(input);
}
} |
Done | public void testPortOverride() {
StorCommunicationmanagerConfig.Builder builder = new StorCommunicationmanagerConfig.Builder();
DistributorCluster cluster =
parse("<cluster id=\"storage\" distributor-base-port=\"14065\">" +
" <documents/>" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>");
cluster.getChildren().get("0").getConfig(builder);
StorCommunicationmanagerConfig config = new StorCommunicationmanagerConfig(builder);
assertTrue(config.mbus().dispatch_on_encode());
assertEquals(14066, config.rpcport());
} | assertTrue(config.mbus().dispatch_on_encode()); | public void testPortOverride() {
StorCommunicationmanagerConfig.Builder builder = new StorCommunicationmanagerConfig.Builder();
DistributorCluster cluster =
parse("<cluster id=\"storage\" distributor-base-port=\"14065\">" +
" <documents/>" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>");
cluster.getChildren().get("0").getConfig(builder);
StorCommunicationmanagerConfig config = new StorCommunicationmanagerConfig(builder);
assertTrue(config.mbus().dispatch_on_encode());
assertEquals(14066, config.rpcport());
} | class DistributorTest {
ContentCluster parseCluster(String xml) {
try {
List<String> searchDefs = ApplicationPackageUtils.generateSchemas("music", "movies", "bunnies");
MockRoot root = ContentClusterUtils.createMockRoot(searchDefs);
return ContentClusterUtils.createCluster(xml, root);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
DistributorCluster parse(String xml) {
return parseCluster(xml).getDistributorNodes();
}
@Test
public void testBasics() {
StorServerConfig.Builder builder = new StorServerConfig.Builder();
parse("<content id=\"foofighters\"><documents/>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</content>\n").
getConfig(builder);
StorServerConfig config = new StorServerConfig(builder);
assertTrue(config.is_distributor());
assertEquals("foofighters", config.cluster_name());
}
@Test
public void testRevertDefaultOffForSearch() {
StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder();
parse("<cluster id=\"storage\">\n" +
" <documents/>" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>").getConfig(builder);
StorDistributormanagerConfig conf = new StorDistributormanagerConfig(builder);
assertFalse(conf.enable_revert());
}
@Test
public void testSplitAndJoin() {
StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder();
parse("<cluster id=\"storage\">\n" +
" <documents/>" +
" <tuning>\n" +
" <bucket-splitting max-documents=\"2K\" max-size=\"25M\" minimum-bits=\"8\" />\n" +
" </tuning>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>").getConfig(builder);
StorDistributormanagerConfig conf = new StorDistributormanagerConfig(builder);
assertEquals(2048, conf.splitcount());
assertEquals(1024, conf.joincount());
assertEquals(26214400, conf.splitsize());
assertEquals(13107200, conf.joinsize());
assertEquals(8, conf.minsplitcount());
assertFalse(conf.inlinebucketsplitting());
}
@Test
public void testThatGroupsAreCountedInWhenComputingSplitBits() {
StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder();
ContentCluster cluster = parseCluster("<cluster id=\"storage\">\n" +
" <documents/>" +
" <tuning>" +
" <distribution type=\"legacy\"/>" +
" </tuning>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" <node distribution-key=\"1\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>");
cluster.getConfig(builder);
StorDistributormanagerConfig conf = new StorDistributormanagerConfig(builder);
assertEquals(1024, conf.splitcount());
assertEquals(512, conf.joincount());
assertEquals(33544432, conf.splitsize());
assertEquals(16000000, conf.joinsize());
assertEquals(8, conf.minsplitcount());
assertTrue(conf.inlinebucketsplitting());
cluster = parseCluster("<cluster id=\"storage\">\n" +
" <redundancy>2</redundancy>" +
" <documents/>" +
" <tuning>" +
" <distribution type=\"legacy\"/>" +
" </tuning>\n" +
" <group>" +
" <distribution partitions=\"1|*\"/>" +
" <group name=\"a\" distribution-key=\"0\">" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
" <group name=\"b\" distribution-key=\"1\">" +
" <node distribution-key=\"1\" hostalias=\"mockhost\"/>" +
" </group>" +
" </group>" +
"</cluster>");
cluster.getConfig(builder);
conf = new StorDistributormanagerConfig(builder);
assertEquals(1024, conf.splitcount());
assertEquals(512, conf.joincount());
assertEquals(33544432, conf.splitsize());
assertEquals(16000000, conf.joinsize());
assertEquals(1, conf.minsplitcount());
assertTrue(conf.inlinebucketsplitting());
}
@Test
public void testMaxMergesPerNode() {
StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder();
DistributorCluster dcluster = parse("<content id=\"storage\">\n" +
" <documents/>" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</content>");
((ContentCluster) dcluster.getParent()).getConfig(builder);
StorDistributormanagerConfig conf = new StorDistributormanagerConfig(builder);
assertEquals(16, conf.maximum_nodes_per_merge());
builder = new StorDistributormanagerConfig.Builder();
dcluster = parse("<content id=\"storage\">\n" +
" <documents/>" +
" <tuning>\n" +
" <merges max-nodes-per-merge=\"4\"/>\n" +
" </tuning>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</content>");
((ContentCluster) dcluster.getParent()).getConfig(builder);
conf = new StorDistributormanagerConfig(builder);
assertEquals(4, conf.maximum_nodes_per_merge());
}
@Test
public void testGarbageCollectionSetExplicitly() {
StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder();
parse("<cluster id=\"storage\">\n" +
" <documents garbage-collection=\"true\">\n" +
" <document type=\"music\"/>\n" +
" </documents>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>").getConfig(builder);
StorDistributormanagerConfig conf = new StorDistributormanagerConfig(builder);
assertEquals(3600, conf.garbagecollection().interval());
assertEquals("not ((music))", conf.garbagecollection().selectiontoremove());
}
@Test
public void testGarbageCollectionInterval() {
StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder();
parse("<cluster id=\"storage\">\n" +
" <documents garbage-collection=\"true\" garbage-collection-interval=\"30\">\n" +
" <document type=\"music\"/>\n" +
" </documents>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>").getConfig(builder);
StorDistributormanagerConfig conf = new StorDistributormanagerConfig(builder);
assertEquals(30, conf.garbagecollection().interval());
}
@Test
public void testGarbageCollectionOffByDefault() {
StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder();
parse("<cluster id=\"storage\">\n" +
" <documents>\n" +
" <document type=\"music\"/>\n" +
" </documents>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>").getConfig(builder);
StorDistributormanagerConfig conf = new StorDistributormanagerConfig(builder);
assertEquals(0, conf.garbagecollection().interval());
assertEquals("", conf.garbagecollection().selectiontoremove());
}
@Test
public void testComplexGarbageCollectionSelectionForIndexedSearch() {
StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder();
parse("<cluster id=\"foo\">\n" +
" <documents garbage-collection=\"true\" selection=\"true\">" +
" <document type=\"music\" selection=\"music.year < now()\"/>\n" +
" <document type=\"movies\" selection=\"movies.year < now() - 1200\"/>\n" +
" </documents>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>").getConfig(builder);
StorDistributormanagerConfig conf = new StorDistributormanagerConfig(builder);
assertEquals(3600, conf.garbagecollection().interval());
assertEquals(
"not ((true) and ((music and (music.year < now())) or (movies and (movies.year < now() - 1200))))",
conf.garbagecollection().selectiontoremove());
}
@Test
public void testGarbageCollectionDisabledIfForced() {
StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder();
parse("<cluster id=\"foo\">\n" +
" <documents selection=\"true\" garbage-collection=\"false\" garbage-collection-interval=\"30\">\n" +
" <document type=\"music\" selection=\"music.year < now()\"/>\n" +
" <document type=\"movies\" selection=\"movies.year < now() - 1200\"/>\n" +
" </documents>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>").getConfig(builder);
StorDistributormanagerConfig conf = new StorDistributormanagerConfig(builder);
assertEquals(0, conf.garbagecollection().interval());
assertEquals("", conf.garbagecollection().selectiontoremove());
}
@Test
private StorDistributormanagerConfig clusterXmlToConfig(String xml) {
StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder();
parse(xml).getConfig(builder);
return new StorDistributormanagerConfig(builder);
}
private String generateXmlForDocTypes(DocType... docTypes) {
return "<content id='storage'>\n" +
DocType.listToXml(docTypes) +
"\n</content>";
}
@Test
public void bucket_activation_disabled_if_no_documents_in_indexed_mode() {
StorDistributormanagerConfig config = clusterXmlToConfig(
generateXmlForDocTypes(DocType.storeOnly("music")));
assertTrue(config.disable_bucket_activation());
}
@Test
public void bucket_activation_enabled_with_single_indexed_document() {
StorDistributormanagerConfig config = clusterXmlToConfig(
generateXmlForDocTypes(DocType.index("music")));
assertFalse(config.disable_bucket_activation());
}
@Test
public void bucket_activation_enabled_with_multiple_indexed_documents() {
StorDistributormanagerConfig config = clusterXmlToConfig(
generateXmlForDocTypes(DocType.index("music"),
DocType.index("movies")));
assertFalse(config.disable_bucket_activation());
}
@Test
public void bucket_activation_enabled_if_at_least_one_document_indexed() {
StorDistributormanagerConfig config = clusterXmlToConfig(
generateXmlForDocTypes(DocType.storeOnly("music"),
DocType.streaming("bunnies"),
DocType.index("movies")));
assertFalse(config.disable_bucket_activation());
}
@Test
public void bucket_activation_disabled_for_single_streaming_type() {
StorDistributormanagerConfig config = clusterXmlToConfig(
generateXmlForDocTypes(DocType.streaming("music")));
assertTrue(config.disable_bucket_activation());
}
} | class DistributorTest {
ContentCluster parseCluster(String xml) {
try {
List<String> searchDefs = ApplicationPackageUtils.generateSchemas("music", "movies", "bunnies");
MockRoot root = ContentClusterUtils.createMockRoot(searchDefs);
return ContentClusterUtils.createCluster(xml, root);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
DistributorCluster parse(String xml) {
return parseCluster(xml).getDistributorNodes();
}
@Test
public void testBasics() {
StorServerConfig.Builder builder = new StorServerConfig.Builder();
parse("<content id=\"foofighters\"><documents/>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</content>\n").
getConfig(builder);
StorServerConfig config = new StorServerConfig(builder);
assertTrue(config.is_distributor());
assertEquals("foofighters", config.cluster_name());
}
@Test
public void testRevertDefaultOffForSearch() {
StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder();
parse("<cluster id=\"storage\">\n" +
" <documents/>" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>").getConfig(builder);
StorDistributormanagerConfig conf = new StorDistributormanagerConfig(builder);
assertFalse(conf.enable_revert());
}
@Test
public void testSplitAndJoin() {
StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder();
parse("<cluster id=\"storage\">\n" +
" <documents/>" +
" <tuning>\n" +
" <bucket-splitting max-documents=\"2K\" max-size=\"25M\" minimum-bits=\"8\" />\n" +
" </tuning>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>").getConfig(builder);
StorDistributormanagerConfig conf = new StorDistributormanagerConfig(builder);
assertEquals(2048, conf.splitcount());
assertEquals(1024, conf.joincount());
assertEquals(26214400, conf.splitsize());
assertEquals(13107200, conf.joinsize());
assertEquals(8, conf.minsplitcount());
assertFalse(conf.inlinebucketsplitting());
}
@Test
public void testThatGroupsAreCountedInWhenComputingSplitBits() {
StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder();
ContentCluster cluster = parseCluster("<cluster id=\"storage\">\n" +
" <documents/>" +
" <tuning>" +
" <distribution type=\"legacy\"/>" +
" </tuning>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" <node distribution-key=\"1\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>");
cluster.getConfig(builder);
StorDistributormanagerConfig conf = new StorDistributormanagerConfig(builder);
assertEquals(1024, conf.splitcount());
assertEquals(512, conf.joincount());
assertEquals(33544432, conf.splitsize());
assertEquals(16000000, conf.joinsize());
assertEquals(8, conf.minsplitcount());
assertTrue(conf.inlinebucketsplitting());
cluster = parseCluster("<cluster id=\"storage\">\n" +
" <redundancy>2</redundancy>" +
" <documents/>" +
" <tuning>" +
" <distribution type=\"legacy\"/>" +
" </tuning>\n" +
" <group>" +
" <distribution partitions=\"1|*\"/>" +
" <group name=\"a\" distribution-key=\"0\">" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
" <group name=\"b\" distribution-key=\"1\">" +
" <node distribution-key=\"1\" hostalias=\"mockhost\"/>" +
" </group>" +
" </group>" +
"</cluster>");
cluster.getConfig(builder);
conf = new StorDistributormanagerConfig(builder);
assertEquals(1024, conf.splitcount());
assertEquals(512, conf.joincount());
assertEquals(33544432, conf.splitsize());
assertEquals(16000000, conf.joinsize());
assertEquals(1, conf.minsplitcount());
assertTrue(conf.inlinebucketsplitting());
}
@Test
public void testMaxMergesPerNode() {
StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder();
DistributorCluster dcluster = parse("<content id=\"storage\">\n" +
" <documents/>" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</content>");
((ContentCluster) dcluster.getParent()).getConfig(builder);
StorDistributormanagerConfig conf = new StorDistributormanagerConfig(builder);
assertEquals(16, conf.maximum_nodes_per_merge());
builder = new StorDistributormanagerConfig.Builder();
dcluster = parse("<content id=\"storage\">\n" +
" <documents/>" +
" <tuning>\n" +
" <merges max-nodes-per-merge=\"4\"/>\n" +
" </tuning>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</content>");
((ContentCluster) dcluster.getParent()).getConfig(builder);
conf = new StorDistributormanagerConfig(builder);
assertEquals(4, conf.maximum_nodes_per_merge());
}
@Test
public void testGarbageCollectionSetExplicitly() {
StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder();
parse("<cluster id=\"storage\">\n" +
" <documents garbage-collection=\"true\">\n" +
" <document type=\"music\"/>\n" +
" </documents>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>").getConfig(builder);
StorDistributormanagerConfig conf = new StorDistributormanagerConfig(builder);
assertEquals(3600, conf.garbagecollection().interval());
assertEquals("not ((music))", conf.garbagecollection().selectiontoremove());
}
@Test
public void testGarbageCollectionInterval() {
StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder();
parse("<cluster id=\"storage\">\n" +
" <documents garbage-collection=\"true\" garbage-collection-interval=\"30\">\n" +
" <document type=\"music\"/>\n" +
" </documents>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>").getConfig(builder);
StorDistributormanagerConfig conf = new StorDistributormanagerConfig(builder);
assertEquals(30, conf.garbagecollection().interval());
}
@Test
public void testGarbageCollectionOffByDefault() {
StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder();
parse("<cluster id=\"storage\">\n" +
" <documents>\n" +
" <document type=\"music\"/>\n" +
" </documents>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>").getConfig(builder);
StorDistributormanagerConfig conf = new StorDistributormanagerConfig(builder);
assertEquals(0, conf.garbagecollection().interval());
assertEquals("", conf.garbagecollection().selectiontoremove());
}
@Test
public void testComplexGarbageCollectionSelectionForIndexedSearch() {
StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder();
parse("<cluster id=\"foo\">\n" +
" <documents garbage-collection=\"true\" selection=\"true\">" +
" <document type=\"music\" selection=\"music.year < now()\"/>\n" +
" <document type=\"movies\" selection=\"movies.year < now() - 1200\"/>\n" +
" </documents>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>").getConfig(builder);
StorDistributormanagerConfig conf = new StorDistributormanagerConfig(builder);
assertEquals(3600, conf.garbagecollection().interval());
assertEquals(
"not ((true) and ((music and (music.year < now())) or (movies and (movies.year < now() - 1200))))",
conf.garbagecollection().selectiontoremove());
}
@Test
public void testGarbageCollectionDisabledIfForced() {
StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder();
parse("<cluster id=\"foo\">\n" +
" <documents selection=\"true\" garbage-collection=\"false\" garbage-collection-interval=\"30\">\n" +
" <document type=\"music\" selection=\"music.year < now()\"/>\n" +
" <document type=\"movies\" selection=\"movies.year < now() - 1200\"/>\n" +
" </documents>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>").getConfig(builder);
StorDistributormanagerConfig conf = new StorDistributormanagerConfig(builder);
assertEquals(0, conf.garbagecollection().interval());
assertEquals("", conf.garbagecollection().selectiontoremove());
}
@Test
@Test
public void testCommunicationManagerDefaults() {
StorCommunicationmanagerConfig.Builder builder = new StorCommunicationmanagerConfig.Builder();
DistributorCluster cluster =
parse("<cluster id=\"storage\">" +
" <documents/>" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>");
cluster.getChildren().get("0").getConfig(builder);
StorCommunicationmanagerConfig config = new StorCommunicationmanagerConfig(builder);
assertTrue(config.mbus().dispatch_on_encode());
assertFalse(config.mbus().dispatch_on_decode());
assertEquals(4, config.mbus().num_threads());
assertEquals(StorCommunicationmanagerConfig.Mbus.Optimize_for.LATENCY, config.mbus().optimize_for());
}
private StorDistributormanagerConfig clusterXmlToConfig(String xml) {
StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder();
parse(xml).getConfig(builder);
return new StorDistributormanagerConfig(builder);
}
private String generateXmlForDocTypes(DocType... docTypes) {
return "<content id='storage'>\n" +
DocType.listToXml(docTypes) +
"\n</content>";
}
@Test
public void bucket_activation_disabled_if_no_documents_in_indexed_mode() {
StorDistributormanagerConfig config = clusterXmlToConfig(
generateXmlForDocTypes(DocType.storeOnly("music")));
assertTrue(config.disable_bucket_activation());
}
@Test
public void bucket_activation_enabled_with_single_indexed_document() {
StorDistributormanagerConfig config = clusterXmlToConfig(
generateXmlForDocTypes(DocType.index("music")));
assertFalse(config.disable_bucket_activation());
}
@Test
public void bucket_activation_enabled_with_multiple_indexed_documents() {
StorDistributormanagerConfig config = clusterXmlToConfig(
generateXmlForDocTypes(DocType.index("music"),
DocType.index("movies")));
assertFalse(config.disable_bucket_activation());
}
@Test
public void bucket_activation_enabled_if_at_least_one_document_indexed() {
StorDistributormanagerConfig config = clusterXmlToConfig(
generateXmlForDocTypes(DocType.storeOnly("music"),
DocType.streaming("bunnies"),
DocType.index("movies")));
assertFalse(config.disable_bucket_activation());
}
@Test
public void bucket_activation_disabled_for_single_streaming_type() {
StorDistributormanagerConfig config = clusterXmlToConfig(
generateXmlForDocTypes(DocType.streaming("music")));
assertTrue(config.disable_bucket_activation());
}
} |
```suggestion log.log(Level.FINEST, () -> "stem '"+oldToken+"' to '"+token+"'"); ``` | private String processToken(String token, Language language, StemMode stemMode, boolean removeAccents) {
log.log(Level.FINEST, "processToken '"+token+"'");
token = normalizer.normalize(token);
token = LinguisticsCase.toLowerCase(token);
if (removeAccents)
token = transformer.accentDrop(token, language);
if (stemMode != StemMode.NONE) {
String oldToken = token;
token = stemmer.stem(token);
log.log(Level.FINEST, "stem '"+oldToken+"' to '"+token+"'");
}
log.log(Level.FINEST, "processed token is: "+token);
return token;
} | log.log(Level.FINEST, "stem '"+oldToken+"' to '"+token+"'"); | private String processToken(String token, Language language, StemMode stemMode, boolean removeAccents) {
final String original = token;
log.log(Level.FINEST, () -> "processToken '"+original+"'");
token = normalizer.normalize(token);
token = LinguisticsCase.toLowerCase(token);
if (removeAccents)
token = transformer.accentDrop(token, language);
if (stemMode != StemMode.NONE) {
final String oldToken = token;
token = stemmer.stem(token);
final String newToken = token;
log.log(Level.FINEST, () -> "stem '"+oldToken+"' to '"+newToken+"'");
}
final String result = token;
log.log(Level.FINEST, () -> "processed token is: "+result);
return result;
} | class SimpleTokenizer implements Tokenizer {
private final static int SPACE_CODE = 32;
private final Normalizer normalizer;
private final Transformer transformer;
private final KStemmer stemmer = new KStemmer();
private static final Logger log = Logger.getLogger(SimpleTokenizer.class.getName());
public SimpleTokenizer() {
this(new SimpleNormalizer(), new SimpleTransformer());
}
public SimpleTokenizer(Normalizer normalizer) {
this(normalizer, new SimpleTransformer());
}
public SimpleTokenizer(Normalizer normalizer, Transformer transformer) {
this.normalizer = normalizer;
this.transformer = transformer;
}
@Override
public Iterable<Token> tokenize(String input, Language language, StemMode stemMode, boolean removeAccents) {
if (input.isEmpty()) return Collections.emptyList();
List<Token> tokens = new ArrayList<>();
int nextCode = input.codePointAt(0);
TokenType prevType = SimpleTokenType.valueOf(nextCode);
for (int prev = 0, next = Character.charCount(nextCode); next <= input.length(); ) {
nextCode = next < input.length() ? input.codePointAt(next) : SPACE_CODE;
TokenType nextType = SimpleTokenType.valueOf(nextCode);
if (!prevType.isIndexable() || !nextType.isIndexable()) {
String original = input.substring(prev, next);
String token = processToken(original, language, stemMode, removeAccents);
tokens.add(new SimpleToken(original).setOffset(prev)
.setType(prevType)
.setTokenString(token));
prev = next;
prevType = nextType;
}
next += Character.charCount(nextCode);
}
return tokens;
}
} | class SimpleTokenizer implements Tokenizer {
private final static int SPACE_CODE = 32;
private final Normalizer normalizer;
private final Transformer transformer;
private final KStemmer stemmer = new KStemmer();
private static final Logger log = Logger.getLogger(SimpleTokenizer.class.getName());
public SimpleTokenizer() {
this(new SimpleNormalizer(), new SimpleTransformer());
}
public SimpleTokenizer(Normalizer normalizer) {
this(normalizer, new SimpleTransformer());
}
public SimpleTokenizer(Normalizer normalizer, Transformer transformer) {
this.normalizer = normalizer;
this.transformer = transformer;
}
@Override
public Iterable<Token> tokenize(String input, Language language, StemMode stemMode, boolean removeAccents) {
if (input.isEmpty()) return Collections.emptyList();
List<Token> tokens = new ArrayList<>();
int nextCode = input.codePointAt(0);
TokenType prevType = SimpleTokenType.valueOf(nextCode);
for (int prev = 0, next = Character.charCount(nextCode); next <= input.length(); ) {
nextCode = next < input.length() ? input.codePointAt(next) : SPACE_CODE;
TokenType nextType = SimpleTokenType.valueOf(nextCode);
if (!prevType.isIndexable() || !nextType.isIndexable()) {
String original = input.substring(prev, next);
String token = processToken(original, language, stemMode, removeAccents);
tokens.add(new SimpleToken(original).setOffset(prev)
.setType(prevType)
.setTokenString(token));
prev = next;
prevType = nextType;
}
next += Character.charCount(nextCode);
}
return tokens;
}
} |
```suggestion log.log(Level.FINEST, () -> "processed token is: "+token); ``` | private String processToken(String token, Language language, StemMode stemMode, boolean removeAccents) {
log.log(Level.FINEST, "processToken '"+token+"'");
token = normalizer.normalize(token);
token = LinguisticsCase.toLowerCase(token);
if (removeAccents)
token = transformer.accentDrop(token, language);
if (stemMode != StemMode.NONE) {
String oldToken = token;
token = stemmer.stem(token);
log.log(Level.FINEST, "stem '"+oldToken+"' to '"+token+"'");
}
log.log(Level.FINEST, "processed token is: "+token);
return token;
} | log.log(Level.FINEST, "processed token is: "+token); | private String processToken(String token, Language language, StemMode stemMode, boolean removeAccents) {
final String original = token;
log.log(Level.FINEST, () -> "processToken '"+original+"'");
token = normalizer.normalize(token);
token = LinguisticsCase.toLowerCase(token);
if (removeAccents)
token = transformer.accentDrop(token, language);
if (stemMode != StemMode.NONE) {
final String oldToken = token;
token = stemmer.stem(token);
final String newToken = token;
log.log(Level.FINEST, () -> "stem '"+oldToken+"' to '"+newToken+"'");
}
final String result = token;
log.log(Level.FINEST, () -> "processed token is: "+result);
return result;
} | class SimpleTokenizer implements Tokenizer {
private final static int SPACE_CODE = 32;
private final Normalizer normalizer;
private final Transformer transformer;
private final KStemmer stemmer = new KStemmer();
private static final Logger log = Logger.getLogger(SimpleTokenizer.class.getName());
public SimpleTokenizer() {
this(new SimpleNormalizer(), new SimpleTransformer());
}
public SimpleTokenizer(Normalizer normalizer) {
this(normalizer, new SimpleTransformer());
}
public SimpleTokenizer(Normalizer normalizer, Transformer transformer) {
this.normalizer = normalizer;
this.transformer = transformer;
}
@Override
public Iterable<Token> tokenize(String input, Language language, StemMode stemMode, boolean removeAccents) {
if (input.isEmpty()) return Collections.emptyList();
List<Token> tokens = new ArrayList<>();
int nextCode = input.codePointAt(0);
TokenType prevType = SimpleTokenType.valueOf(nextCode);
for (int prev = 0, next = Character.charCount(nextCode); next <= input.length(); ) {
nextCode = next < input.length() ? input.codePointAt(next) : SPACE_CODE;
TokenType nextType = SimpleTokenType.valueOf(nextCode);
if (!prevType.isIndexable() || !nextType.isIndexable()) {
String original = input.substring(prev, next);
String token = processToken(original, language, stemMode, removeAccents);
tokens.add(new SimpleToken(original).setOffset(prev)
.setType(prevType)
.setTokenString(token));
prev = next;
prevType = nextType;
}
next += Character.charCount(nextCode);
}
return tokens;
}
} | class SimpleTokenizer implements Tokenizer {
private final static int SPACE_CODE = 32;
private final Normalizer normalizer;
private final Transformer transformer;
private final KStemmer stemmer = new KStemmer();
private static final Logger log = Logger.getLogger(SimpleTokenizer.class.getName());
public SimpleTokenizer() {
this(new SimpleNormalizer(), new SimpleTransformer());
}
public SimpleTokenizer(Normalizer normalizer) {
this(normalizer, new SimpleTransformer());
}
public SimpleTokenizer(Normalizer normalizer, Transformer transformer) {
this.normalizer = normalizer;
this.transformer = transformer;
}
@Override
public Iterable<Token> tokenize(String input, Language language, StemMode stemMode, boolean removeAccents) {
if (input.isEmpty()) return Collections.emptyList();
List<Token> tokens = new ArrayList<>();
int nextCode = input.codePointAt(0);
TokenType prevType = SimpleTokenType.valueOf(nextCode);
for (int prev = 0, next = Character.charCount(nextCode); next <= input.length(); ) {
nextCode = next < input.length() ? input.codePointAt(next) : SPACE_CODE;
TokenType nextType = SimpleTokenType.valueOf(nextCode);
if (!prevType.isIndexable() || !nextType.isIndexable()) {
String original = input.substring(prev, next);
String token = processToken(original, language, stemMode, removeAccents);
tokens.add(new SimpleToken(original).setOffset(prev)
.setType(prevType)
.setTokenString(token));
prev = next;
prevType = nextType;
}
next += Character.charCount(nextCode);
}
return tokens;
}
} |
Use "application" and "platform" instead or is this something else? | private HttpResponse root(HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
Cursor platformArray = root.setArray("versions");
var versionStatus = controller.versionStatus();
var systemVersion = versionStatus.systemVersion().map(VespaVersion::versionNumber).orElse(Vtag.currentVersion);
var deploymentStatuses = controller.jobController().deploymentStatuses(ApplicationList.from(controller.applications().asList()), systemVersion);
var deploymentStatistics = DeploymentStatistics.compute(versionStatus.versions().stream().map(VespaVersion::versionNumber).collect(toList()),
deploymentStatuses)
.stream().collect(toMap(DeploymentStatistics::version, identity()));
for (VespaVersion version : versionStatus.versions()) {
Cursor versionObject = platformArray.addObject();
versionObject.setString("version", version.versionNumber().toString());
versionObject.setString("confidence", version.confidence().name());
versionObject.setString("commit", version.releaseCommit());
versionObject.setLong("date", version.committedAt().toEpochMilli());
versionObject.setBool("controllerVersion", version.isControllerVersion());
versionObject.setBool("systemVersion", version.isSystemVersion());
Cursor configServerArray = versionObject.setArray("configServers");
for (HostName hostname : version.nodeVersions().hostnames()) {
Cursor configServerObject = configServerArray.addObject();
configServerObject.setString("hostname", hostname.value());
}
DeploymentStatistics statistics = deploymentStatistics.get(version.versionNumber());
Cursor failingArray = versionObject.setArray("failingApplications");
for (Run run : statistics.failingUpgrades()) {
Cursor applicationObject = failingArray.addObject();
toSlime(applicationObject, run.id().application(), request);
applicationObject.setString("failing", run.id().type().jobName());
applicationObject.setString("status", run.status().name());
}
var statusByInstance = deploymentStatuses.asList().stream()
.flatMap(status -> status.instanceJobs().keySet().stream()
.map(instance -> Map.entry(instance, status)))
.collect(toUnmodifiableMap(entry -> entry.getKey(), entry -> entry.getValue()));
var jobsByInstance = statusByInstance.entrySet().stream()
.collect(toUnmodifiableMap(entry -> entry.getKey(),
entry -> entry.getValue().instanceJobs().get(entry.getKey())));
Cursor productionArray = versionObject.setArray("productionApplications");
statistics.productionSuccesses().stream()
.collect(groupingBy(run -> run.id().application()))
.forEach((id, runs) -> {
Cursor applicationObject = productionArray.addObject();
toSlime(applicationObject, id, request);
applicationObject.setLong("productionJobs", jobsByInstance.get(id).production().size());
applicationObject.setLong("productionSuccesses", runs.size());
});
Cursor runningArray = versionObject.setArray("deployingApplications");
for (Run run : statistics.runningUpgrade()) {
Cursor applicationObject = runningArray.addObject();
toSlime(applicationObject, run.id().application(), request);
applicationObject.setString("running", run.id().type().jobName());
}
class RunInfo {
final Run run;
final boolean upgrade;
RunInfo(Run run, boolean upgrade) { this.run = run; this.upgrade = upgrade; }
@Override public String toString() { return run.id().toString(); }
}
Cursor instancesArray = versionObject.setArray("applications");
Stream.of(statistics.failingUpgrades().stream().map(run -> new RunInfo(run, true)),
statistics.otherFailing().stream().map(run -> new RunInfo(run, false)),
statistics.runningUpgrade().stream().map(run -> new RunInfo(run, true)),
statistics.otherRunning().stream().map(run -> new RunInfo(run, false)),
statistics.productionSuccesses().stream().map(run -> new RunInfo(run, true)))
.flatMap(identity())
.collect(Collectors.groupingBy(run -> run.run.id().application(),
LinkedHashMap::new,
groupingBy(run -> run.run.id().type(),
LinkedHashMap::new,
toList())))
.forEach((instance, runs) -> {
var status = statusByInstance.get(instance);
var jobsToRun = status.jobsToRun();
Cursor instanceObject = instancesArray.addObject();
instanceObject.setString("tenant", instance.tenant().value());
instanceObject.setString("application", instance.application().value());
instanceObject.setString("instance", instance.instance().value());
instanceObject.setBool("upgrading", status.application().require(instance.instance()).change().platform().equals(Optional.of(statistics.version())));
status.instanceSteps().get(instance.instance()).blockedUntil(Change.of(statistics.version()))
.ifPresent(until -> instanceObject.setLong("blockedUntil", until.toEpochMilli()));
instanceObject.setString("upgradePolicy", toString(status.application().deploymentSpec().instance(instance.instance())
.map(DeploymentInstanceSpec::upgradePolicy)
.orElse(DeploymentSpec.UpgradePolicy.defaultPolicy)));
Cursor jobsArray = instanceObject.setArray("jobs");
status.jobSteps().forEach((job, jobStatus) -> {
if ( ! job.application().equals(instance)) return;
Cursor jobObject = jobsArray.addObject();
jobObject.setString("name", job.type().jobName());
jobStatus.pausedUntil().ifPresent(until -> jobObject.setLong("pausedUntil", until.toEpochMilli()));
jobStatus.coolingDownUntil(status.application().require(instance.instance()).change())
.ifPresent(until -> jobObject.setLong("coolingDownUntil", until.toEpochMilli()));
if (jobsToRun.containsKey(job)) {
jobObject.setString("pending", jobsToRun.get(job).stream()
.allMatch(versions -> versions.sourcePlatform()
.map(versions.targetPlatform()::equals)
.orElse(true))
? "revision" : "upgrade");
}
});
Cursor allRunsObject = instanceObject.setObject("allRuns");
Cursor upgradeRunsObject = instanceObject.setObject("upgradeRuns");
runs.forEach((type, rs) -> {
Cursor runObject = allRunsObject.setObject(type.jobName());
Cursor upgradeObject = upgradeRunsObject.setObject(type.jobName());
for (RunInfo run : rs) {
toSlime(runObject, run.run);
if (run.upgrade)
toSlime(upgradeObject, run.run);
}
});
});
}
JobType.allIn(controller.system()).stream()
.filter(job -> ! job.environment().isManuallyDeployed())
.map(JobType::jobName).forEach(root.setArray("jobs")::addString);
return new SlimeJsonResponse(slime);
} | ? "revision" : "upgrade"); | private HttpResponse root(HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
Cursor platformArray = root.setArray("versions");
var versionStatus = controller.versionStatus();
var systemVersion = versionStatus.systemVersion().map(VespaVersion::versionNumber).orElse(Vtag.currentVersion);
var deploymentStatuses = controller.jobController().deploymentStatuses(ApplicationList.from(controller.applications().asList()), systemVersion);
var deploymentStatistics = DeploymentStatistics.compute(versionStatus.versions().stream().map(VespaVersion::versionNumber).collect(toList()),
deploymentStatuses)
.stream().collect(toMap(DeploymentStatistics::version, identity()));
for (VespaVersion version : versionStatus.versions()) {
Cursor versionObject = platformArray.addObject();
versionObject.setString("version", version.versionNumber().toString());
versionObject.setString("confidence", version.confidence().name());
versionObject.setString("commit", version.releaseCommit());
versionObject.setLong("date", version.committedAt().toEpochMilli());
versionObject.setBool("controllerVersion", version.isControllerVersion());
versionObject.setBool("systemVersion", version.isSystemVersion());
Cursor configServerArray = versionObject.setArray("configServers");
for (HostName hostname : version.nodeVersions().hostnames()) {
Cursor configServerObject = configServerArray.addObject();
configServerObject.setString("hostname", hostname.value());
}
DeploymentStatistics statistics = deploymentStatistics.get(version.versionNumber());
Cursor failingArray = versionObject.setArray("failingApplications");
for (Run run : statistics.failingUpgrades()) {
Cursor applicationObject = failingArray.addObject();
toSlime(applicationObject, run.id().application(), request);
applicationObject.setString("failing", run.id().type().jobName());
applicationObject.setString("status", run.status().name());
}
var statusByInstance = deploymentStatuses.asList().stream()
.flatMap(status -> status.instanceJobs().keySet().stream()
.map(instance -> Map.entry(instance, status)))
.collect(toUnmodifiableMap(entry -> entry.getKey(), entry -> entry.getValue()));
var jobsByInstance = statusByInstance.entrySet().stream()
.collect(toUnmodifiableMap(entry -> entry.getKey(),
entry -> entry.getValue().instanceJobs().get(entry.getKey())));
Cursor productionArray = versionObject.setArray("productionApplications");
statistics.productionSuccesses().stream()
.collect(groupingBy(run -> run.id().application()))
.forEach((id, runs) -> {
Cursor applicationObject = productionArray.addObject();
toSlime(applicationObject, id, request);
applicationObject.setLong("productionJobs", jobsByInstance.get(id).production().size());
applicationObject.setLong("productionSuccesses", runs.size());
});
Cursor runningArray = versionObject.setArray("deployingApplications");
for (Run run : statistics.runningUpgrade()) {
Cursor applicationObject = runningArray.addObject();
toSlime(applicationObject, run.id().application(), request);
applicationObject.setString("running", run.id().type().jobName());
}
class RunInfo {
final Run run;
final boolean upgrade;
RunInfo(Run run, boolean upgrade) { this.run = run; this.upgrade = upgrade; }
@Override public String toString() { return run.id().toString(); }
}
Cursor instancesArray = versionObject.setArray("applications");
Stream.of(statistics.failingUpgrades().stream().map(run -> new RunInfo(run, true)),
statistics.otherFailing().stream().map(run -> new RunInfo(run, false)),
statistics.runningUpgrade().stream().map(run -> new RunInfo(run, true)),
statistics.otherRunning().stream().map(run -> new RunInfo(run, false)),
statistics.productionSuccesses().stream().map(run -> new RunInfo(run, true)))
.flatMap(identity())
.collect(Collectors.groupingBy(run -> run.run.id().application(),
LinkedHashMap::new,
groupingBy(run -> run.run.id().type(),
LinkedHashMap::new,
toList())))
.forEach((instance, runs) -> {
var status = statusByInstance.get(instance);
var jobsToRun = status.jobsToRun();
Cursor instanceObject = instancesArray.addObject();
instanceObject.setString("tenant", instance.tenant().value());
instanceObject.setString("application", instance.application().value());
instanceObject.setString("instance", instance.instance().value());
instanceObject.setBool("upgrading", status.application().require(instance.instance()).change().platform().equals(Optional.of(statistics.version())));
status.instanceSteps().get(instance.instance()).blockedUntil(Change.of(statistics.version()))
.ifPresent(until -> instanceObject.setLong("blockedUntil", until.toEpochMilli()));
instanceObject.setString("upgradePolicy", toString(status.application().deploymentSpec().instance(instance.instance())
.map(DeploymentInstanceSpec::upgradePolicy)
.orElse(DeploymentSpec.UpgradePolicy.defaultPolicy)));
Cursor jobsArray = instanceObject.setArray("jobs");
status.jobSteps().forEach((job, jobStatus) -> {
if ( ! job.application().equals(instance)) return;
Cursor jobObject = jobsArray.addObject();
jobObject.setString("name", job.type().jobName());
jobStatus.pausedUntil().ifPresent(until -> jobObject.setLong("pausedUntil", until.toEpochMilli()));
jobStatus.coolingDownUntil(status.application().require(instance.instance()).change())
.ifPresent(until -> jobObject.setLong("coolingDownUntil", until.toEpochMilli()));
if (jobsToRun.containsKey(job)) {
jobObject.setString("pending", jobsToRun.get(job).stream()
.allMatch(versions -> versions.sourcePlatform()
.map(versions.targetPlatform()::equals)
.orElse(true))
? "application" : "platform");
}
});
Cursor allRunsObject = instanceObject.setObject("allRuns");
Cursor upgradeRunsObject = instanceObject.setObject("upgradeRuns");
runs.forEach((type, rs) -> {
Cursor runObject = allRunsObject.setObject(type.jobName());
Cursor upgradeObject = upgradeRunsObject.setObject(type.jobName());
for (RunInfo run : rs) {
toSlime(runObject, run.run);
if (run.upgrade)
toSlime(upgradeObject, run.run);
}
});
});
}
JobType.allIn(controller.system()).stream()
.filter(job -> ! job.environment().isManuallyDeployed())
.map(JobType::jobName).forEach(root.setArray("jobs")::addString);
return new SlimeJsonResponse(slime);
} | class DeploymentApiHandler extends LoggingRequestHandler {
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
public DeploymentApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/deployment/v1/")) return root(request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,OPTIONS");
return response;
}
private void toSlime(Cursor jobObject, Run run) {
String key = run.hasFailed() ? "failing" : run.hasEnded() ? "success" : "running";
Cursor runObject = jobObject.setObject(key);
runObject.setLong("number", run.id().number());
runObject.setLong("start", run.start().toEpochMilli());
run.end().ifPresent(end -> runObject.setLong("end", end.toEpochMilli()));
runObject.setString("status", run.status().name());
}
private void toSlime(Cursor object, ApplicationId id, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", new Uri(request.getUri()).withPath("/application/v4/tenant/" +
id.tenant().value() +
"/application/" +
id.application().value()).toString());
object.setString("upgradePolicy", toString(controller.applications().requireApplication(TenantAndApplicationId.from(id))
.deploymentSpec().instance(id.instance()).map(DeploymentInstanceSpec::upgradePolicy)
.orElse(DeploymentSpec.UpgradePolicy.defaultPolicy)));
}
private static String toString(DeploymentSpec.UpgradePolicy upgradePolicy) {
if (upgradePolicy == DeploymentSpec.UpgradePolicy.defaultPolicy) {
return "default";
}
return upgradePolicy.name();
}
} | class DeploymentApiHandler extends LoggingRequestHandler {
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
public DeploymentApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/deployment/v1/")) return root(request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,OPTIONS");
return response;
}
private void toSlime(Cursor jobObject, Run run) {
String key = run.hasFailed() ? "failing" : run.hasEnded() ? "success" : "running";
Cursor runObject = jobObject.setObject(key);
runObject.setLong("number", run.id().number());
runObject.setLong("start", run.start().toEpochMilli());
run.end().ifPresent(end -> runObject.setLong("end", end.toEpochMilli()));
runObject.setString("status", run.status().name());
}
private void toSlime(Cursor object, ApplicationId id, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", new Uri(request.getUri()).withPath("/application/v4/tenant/" +
id.tenant().value() +
"/application/" +
id.application().value()).toString());
object.setString("upgradePolicy", toString(controller.applications().requireApplication(TenantAndApplicationId.from(id))
.deploymentSpec().instance(id.instance()).map(DeploymentInstanceSpec::upgradePolicy)
.orElse(DeploymentSpec.UpgradePolicy.defaultPolicy)));
}
private static String toString(DeploymentSpec.UpgradePolicy upgradePolicy) {
if (upgradePolicy == DeploymentSpec.UpgradePolicy.defaultPolicy) {
return "default";
}
return upgradePolicy.name();
}
} |
Hmm, ok. | private HttpResponse root(HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
Cursor platformArray = root.setArray("versions");
var versionStatus = controller.versionStatus();
var systemVersion = versionStatus.systemVersion().map(VespaVersion::versionNumber).orElse(Vtag.currentVersion);
var deploymentStatuses = controller.jobController().deploymentStatuses(ApplicationList.from(controller.applications().asList()), systemVersion);
var deploymentStatistics = DeploymentStatistics.compute(versionStatus.versions().stream().map(VespaVersion::versionNumber).collect(toList()),
deploymentStatuses)
.stream().collect(toMap(DeploymentStatistics::version, identity()));
for (VespaVersion version : versionStatus.versions()) {
Cursor versionObject = platformArray.addObject();
versionObject.setString("version", version.versionNumber().toString());
versionObject.setString("confidence", version.confidence().name());
versionObject.setString("commit", version.releaseCommit());
versionObject.setLong("date", version.committedAt().toEpochMilli());
versionObject.setBool("controllerVersion", version.isControllerVersion());
versionObject.setBool("systemVersion", version.isSystemVersion());
Cursor configServerArray = versionObject.setArray("configServers");
for (HostName hostname : version.nodeVersions().hostnames()) {
Cursor configServerObject = configServerArray.addObject();
configServerObject.setString("hostname", hostname.value());
}
DeploymentStatistics statistics = deploymentStatistics.get(version.versionNumber());
Cursor failingArray = versionObject.setArray("failingApplications");
for (Run run : statistics.failingUpgrades()) {
Cursor applicationObject = failingArray.addObject();
toSlime(applicationObject, run.id().application(), request);
applicationObject.setString("failing", run.id().type().jobName());
applicationObject.setString("status", run.status().name());
}
var statusByInstance = deploymentStatuses.asList().stream()
.flatMap(status -> status.instanceJobs().keySet().stream()
.map(instance -> Map.entry(instance, status)))
.collect(toUnmodifiableMap(entry -> entry.getKey(), entry -> entry.getValue()));
var jobsByInstance = statusByInstance.entrySet().stream()
.collect(toUnmodifiableMap(entry -> entry.getKey(),
entry -> entry.getValue().instanceJobs().get(entry.getKey())));
Cursor productionArray = versionObject.setArray("productionApplications");
statistics.productionSuccesses().stream()
.collect(groupingBy(run -> run.id().application()))
.forEach((id, runs) -> {
Cursor applicationObject = productionArray.addObject();
toSlime(applicationObject, id, request);
applicationObject.setLong("productionJobs", jobsByInstance.get(id).production().size());
applicationObject.setLong("productionSuccesses", runs.size());
});
Cursor runningArray = versionObject.setArray("deployingApplications");
for (Run run : statistics.runningUpgrade()) {
Cursor applicationObject = runningArray.addObject();
toSlime(applicationObject, run.id().application(), request);
applicationObject.setString("running", run.id().type().jobName());
}
class RunInfo {
final Run run;
final boolean upgrade;
RunInfo(Run run, boolean upgrade) { this.run = run; this.upgrade = upgrade; }
@Override public String toString() { return run.id().toString(); }
}
Cursor instancesArray = versionObject.setArray("applications");
Stream.of(statistics.failingUpgrades().stream().map(run -> new RunInfo(run, true)),
statistics.otherFailing().stream().map(run -> new RunInfo(run, false)),
statistics.runningUpgrade().stream().map(run -> new RunInfo(run, true)),
statistics.otherRunning().stream().map(run -> new RunInfo(run, false)),
statistics.productionSuccesses().stream().map(run -> new RunInfo(run, true)))
.flatMap(identity())
.collect(Collectors.groupingBy(run -> run.run.id().application(),
LinkedHashMap::new,
groupingBy(run -> run.run.id().type(),
LinkedHashMap::new,
toList())))
.forEach((instance, runs) -> {
var status = statusByInstance.get(instance);
var jobsToRun = status.jobsToRun();
Cursor instanceObject = instancesArray.addObject();
instanceObject.setString("tenant", instance.tenant().value());
instanceObject.setString("application", instance.application().value());
instanceObject.setString("instance", instance.instance().value());
instanceObject.setBool("upgrading", status.application().require(instance.instance()).change().platform().equals(Optional.of(statistics.version())));
status.instanceSteps().get(instance.instance()).blockedUntil(Change.of(statistics.version()))
.ifPresent(until -> instanceObject.setLong("blockedUntil", until.toEpochMilli()));
instanceObject.setString("upgradePolicy", toString(status.application().deploymentSpec().instance(instance.instance())
.map(DeploymentInstanceSpec::upgradePolicy)
.orElse(DeploymentSpec.UpgradePolicy.defaultPolicy)));
Cursor jobsArray = instanceObject.setArray("jobs");
status.jobSteps().forEach((job, jobStatus) -> {
if ( ! job.application().equals(instance)) return;
Cursor jobObject = jobsArray.addObject();
jobObject.setString("name", job.type().jobName());
jobStatus.pausedUntil().ifPresent(until -> jobObject.setLong("pausedUntil", until.toEpochMilli()));
jobStatus.coolingDownUntil(status.application().require(instance.instance()).change())
.ifPresent(until -> jobObject.setLong("coolingDownUntil", until.toEpochMilli()));
if (jobsToRun.containsKey(job)) {
jobObject.setString("pending", jobsToRun.get(job).stream()
.allMatch(versions -> versions.sourcePlatform()
.map(versions.targetPlatform()::equals)
.orElse(true))
? "revision" : "upgrade");
}
});
Cursor allRunsObject = instanceObject.setObject("allRuns");
Cursor upgradeRunsObject = instanceObject.setObject("upgradeRuns");
runs.forEach((type, rs) -> {
Cursor runObject = allRunsObject.setObject(type.jobName());
Cursor upgradeObject = upgradeRunsObject.setObject(type.jobName());
for (RunInfo run : rs) {
toSlime(runObject, run.run);
if (run.upgrade)
toSlime(upgradeObject, run.run);
}
});
});
}
JobType.allIn(controller.system()).stream()
.filter(job -> ! job.environment().isManuallyDeployed())
.map(JobType::jobName).forEach(root.setArray("jobs")::addString);
return new SlimeJsonResponse(slime);
} | ? "revision" : "upgrade"); | private HttpResponse root(HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
Cursor platformArray = root.setArray("versions");
var versionStatus = controller.versionStatus();
var systemVersion = versionStatus.systemVersion().map(VespaVersion::versionNumber).orElse(Vtag.currentVersion);
var deploymentStatuses = controller.jobController().deploymentStatuses(ApplicationList.from(controller.applications().asList()), systemVersion);
var deploymentStatistics = DeploymentStatistics.compute(versionStatus.versions().stream().map(VespaVersion::versionNumber).collect(toList()),
deploymentStatuses)
.stream().collect(toMap(DeploymentStatistics::version, identity()));
for (VespaVersion version : versionStatus.versions()) {
Cursor versionObject = platformArray.addObject();
versionObject.setString("version", version.versionNumber().toString());
versionObject.setString("confidence", version.confidence().name());
versionObject.setString("commit", version.releaseCommit());
versionObject.setLong("date", version.committedAt().toEpochMilli());
versionObject.setBool("controllerVersion", version.isControllerVersion());
versionObject.setBool("systemVersion", version.isSystemVersion());
Cursor configServerArray = versionObject.setArray("configServers");
for (HostName hostname : version.nodeVersions().hostnames()) {
Cursor configServerObject = configServerArray.addObject();
configServerObject.setString("hostname", hostname.value());
}
DeploymentStatistics statistics = deploymentStatistics.get(version.versionNumber());
Cursor failingArray = versionObject.setArray("failingApplications");
for (Run run : statistics.failingUpgrades()) {
Cursor applicationObject = failingArray.addObject();
toSlime(applicationObject, run.id().application(), request);
applicationObject.setString("failing", run.id().type().jobName());
applicationObject.setString("status", run.status().name());
}
var statusByInstance = deploymentStatuses.asList().stream()
.flatMap(status -> status.instanceJobs().keySet().stream()
.map(instance -> Map.entry(instance, status)))
.collect(toUnmodifiableMap(entry -> entry.getKey(), entry -> entry.getValue()));
var jobsByInstance = statusByInstance.entrySet().stream()
.collect(toUnmodifiableMap(entry -> entry.getKey(),
entry -> entry.getValue().instanceJobs().get(entry.getKey())));
Cursor productionArray = versionObject.setArray("productionApplications");
statistics.productionSuccesses().stream()
.collect(groupingBy(run -> run.id().application()))
.forEach((id, runs) -> {
Cursor applicationObject = productionArray.addObject();
toSlime(applicationObject, id, request);
applicationObject.setLong("productionJobs", jobsByInstance.get(id).production().size());
applicationObject.setLong("productionSuccesses", runs.size());
});
Cursor runningArray = versionObject.setArray("deployingApplications");
for (Run run : statistics.runningUpgrade()) {
Cursor applicationObject = runningArray.addObject();
toSlime(applicationObject, run.id().application(), request);
applicationObject.setString("running", run.id().type().jobName());
}
class RunInfo {
final Run run;
final boolean upgrade;
RunInfo(Run run, boolean upgrade) { this.run = run; this.upgrade = upgrade; }
@Override public String toString() { return run.id().toString(); }
}
Cursor instancesArray = versionObject.setArray("applications");
Stream.of(statistics.failingUpgrades().stream().map(run -> new RunInfo(run, true)),
statistics.otherFailing().stream().map(run -> new RunInfo(run, false)),
statistics.runningUpgrade().stream().map(run -> new RunInfo(run, true)),
statistics.otherRunning().stream().map(run -> new RunInfo(run, false)),
statistics.productionSuccesses().stream().map(run -> new RunInfo(run, true)))
.flatMap(identity())
.collect(Collectors.groupingBy(run -> run.run.id().application(),
LinkedHashMap::new,
groupingBy(run -> run.run.id().type(),
LinkedHashMap::new,
toList())))
.forEach((instance, runs) -> {
var status = statusByInstance.get(instance);
var jobsToRun = status.jobsToRun();
Cursor instanceObject = instancesArray.addObject();
instanceObject.setString("tenant", instance.tenant().value());
instanceObject.setString("application", instance.application().value());
instanceObject.setString("instance", instance.instance().value());
instanceObject.setBool("upgrading", status.application().require(instance.instance()).change().platform().equals(Optional.of(statistics.version())));
status.instanceSteps().get(instance.instance()).blockedUntil(Change.of(statistics.version()))
.ifPresent(until -> instanceObject.setLong("blockedUntil", until.toEpochMilli()));
instanceObject.setString("upgradePolicy", toString(status.application().deploymentSpec().instance(instance.instance())
.map(DeploymentInstanceSpec::upgradePolicy)
.orElse(DeploymentSpec.UpgradePolicy.defaultPolicy)));
Cursor jobsArray = instanceObject.setArray("jobs");
status.jobSteps().forEach((job, jobStatus) -> {
if ( ! job.application().equals(instance)) return;
Cursor jobObject = jobsArray.addObject();
jobObject.setString("name", job.type().jobName());
jobStatus.pausedUntil().ifPresent(until -> jobObject.setLong("pausedUntil", until.toEpochMilli()));
jobStatus.coolingDownUntil(status.application().require(instance.instance()).change())
.ifPresent(until -> jobObject.setLong("coolingDownUntil", until.toEpochMilli()));
if (jobsToRun.containsKey(job)) {
jobObject.setString("pending", jobsToRun.get(job).stream()
.allMatch(versions -> versions.sourcePlatform()
.map(versions.targetPlatform()::equals)
.orElse(true))
? "application" : "platform");
}
});
Cursor allRunsObject = instanceObject.setObject("allRuns");
Cursor upgradeRunsObject = instanceObject.setObject("upgradeRuns");
runs.forEach((type, rs) -> {
Cursor runObject = allRunsObject.setObject(type.jobName());
Cursor upgradeObject = upgradeRunsObject.setObject(type.jobName());
for (RunInfo run : rs) {
toSlime(runObject, run.run);
if (run.upgrade)
toSlime(upgradeObject, run.run);
}
});
});
}
JobType.allIn(controller.system()).stream()
.filter(job -> ! job.environment().isManuallyDeployed())
.map(JobType::jobName).forEach(root.setArray("jobs")::addString);
return new SlimeJsonResponse(slime);
} | class DeploymentApiHandler extends LoggingRequestHandler {
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
public DeploymentApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/deployment/v1/")) return root(request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,OPTIONS");
return response;
}
private void toSlime(Cursor jobObject, Run run) {
String key = run.hasFailed() ? "failing" : run.hasEnded() ? "success" : "running";
Cursor runObject = jobObject.setObject(key);
runObject.setLong("number", run.id().number());
runObject.setLong("start", run.start().toEpochMilli());
run.end().ifPresent(end -> runObject.setLong("end", end.toEpochMilli()));
runObject.setString("status", run.status().name());
}
private void toSlime(Cursor object, ApplicationId id, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", new Uri(request.getUri()).withPath("/application/v4/tenant/" +
id.tenant().value() +
"/application/" +
id.application().value()).toString());
object.setString("upgradePolicy", toString(controller.applications().requireApplication(TenantAndApplicationId.from(id))
.deploymentSpec().instance(id.instance()).map(DeploymentInstanceSpec::upgradePolicy)
.orElse(DeploymentSpec.UpgradePolicy.defaultPolicy)));
}
private static String toString(DeploymentSpec.UpgradePolicy upgradePolicy) {
if (upgradePolicy == DeploymentSpec.UpgradePolicy.defaultPolicy) {
return "default";
}
return upgradePolicy.name();
}
} | class DeploymentApiHandler extends LoggingRequestHandler {
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
public DeploymentApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/deployment/v1/")) return root(request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,OPTIONS");
return response;
}
private void toSlime(Cursor jobObject, Run run) {
String key = run.hasFailed() ? "failing" : run.hasEnded() ? "success" : "running";
Cursor runObject = jobObject.setObject(key);
runObject.setLong("number", run.id().number());
runObject.setLong("start", run.start().toEpochMilli());
run.end().ifPresent(end -> runObject.setLong("end", end.toEpochMilli()));
runObject.setString("status", run.status().name());
}
private void toSlime(Cursor object, ApplicationId id, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", new Uri(request.getUri()).withPath("/application/v4/tenant/" +
id.tenant().value() +
"/application/" +
id.application().value()).toString());
object.setString("upgradePolicy", toString(controller.applications().requireApplication(TenantAndApplicationId.from(id))
.deploymentSpec().instance(id.instance()).map(DeploymentInstanceSpec::upgradePolicy)
.orElse(DeploymentSpec.UpgradePolicy.defaultPolicy)));
}
private static String toString(DeploymentSpec.UpgradePolicy upgradePolicy) {
if (upgradePolicy == DeploymentSpec.UpgradePolicy.defaultPolicy) {
return "default";
}
return upgradePolicy.name();
}
} |
The `container-storage` addition means more typing in tests... :( | public NodeAgentContextImpl build() {
return new NodeAgentContextImpl(
nodeSpecBuilder.build(),
Optional.ofNullable(acl).orElse(Acl.EMPTY),
Optional.ofNullable(identity).orElseGet(() -> new AthenzService("domain", "service")),
Optional.ofNullable(dockerNetworking).orElse(DockerNetworking.HOST_NETWORK),
Optional.ofNullable(zone).orElseGet(() -> new ZoneApi() {
@Override
public SystemName getSystemName() {
return SystemName.defaultSystem();
}
@Override
public ZoneId getId() {
return ZoneId.defaultId();
}
@Override
public CloudName getCloudName() {
return CloudName.defaultName();
}
@Override
public String getCloudNativeRegionName() {
return getId().region().value();
}
}),
fileSystem,
fileSystem.getPath("/home/docker/container-storage"),
fileSystem.getPath("/opt/vespa"),
Optional.ofNullable(vespaUser).orElse("vespa"),
Optional.ofNullable(vespaUserOnHost).orElse("container_vespa"),
cpuSpeedUp);
} | fileSystem.getPath("/home/docker/container-storage"), | public NodeAgentContextImpl build() {
return new NodeAgentContextImpl(
nodeSpecBuilder.build(),
Optional.ofNullable(acl).orElse(Acl.EMPTY),
Optional.ofNullable(identity).orElseGet(() -> new AthenzService("domain", "service")),
Optional.ofNullable(dockerNetworking).orElse(DockerNetworking.HOST_NETWORK),
Optional.ofNullable(zone).orElseGet(() -> new ZoneApi() {
@Override
public SystemName getSystemName() {
return SystemName.defaultSystem();
}
@Override
public ZoneId getId() {
return ZoneId.defaultId();
}
@Override
public CloudName getCloudName() {
return CloudName.defaultName();
}
@Override
public String getCloudNativeRegionName() {
return getId().region().value();
}
}),
fileSystem,
fileSystem.getPath("/home/docker/container-storage"),
fileSystem.getPath("/opt/vespa"),
Optional.ofNullable(vespaUser).orElse("vespa"),
Optional.ofNullable(vespaUserOnHost).orElse("container_vespa"),
cpuSpeedUp);
} | class Builder {
private NodeSpec.Builder nodeSpecBuilder;
private Acl acl;
private AthenzIdentity identity;
private DockerNetworking dockerNetworking;
private ZoneApi zone;
private String vespaUser;
private String vespaUserOnHost;
private FileSystem fileSystem = FileSystems.getDefault();
private double cpuSpeedUp = 1;
public Builder(NodeSpec node) {
this.nodeSpecBuilder = new NodeSpec.Builder(node);
}
/**
* Creates a NodeAgentContext.Builder with a NodeSpec that has the given hostname and some
* reasonable values for the remaining required NodeSpec fields. Use {@link
* if you want to control the entire NodeSpec.
*/
public Builder(String hostname) {
this.nodeSpecBuilder = NodeSpec.Builder.testSpec(hostname);
}
public Builder nodeSpecBuilder(Function<NodeSpec.Builder, NodeSpec.Builder> nodeSpecBuilderModifier) {
this.nodeSpecBuilder = nodeSpecBuilderModifier.apply(nodeSpecBuilder);
return this;
}
public Builder acl(Acl acl) {
this.acl = acl;
return this;
}
public Builder identity(AthenzIdentity identity) {
this.identity = identity;
return this;
}
public Builder dockerNetworking(DockerNetworking dockerNetworking) {
this.dockerNetworking = dockerNetworking;
return this;
}
public Builder zone(ZoneApi zone) {
this.zone = zone;
return this;
}
public Builder vespaUser(String vespaUser) {
this.vespaUser = vespaUser;
return this;
}
public Builder vespaUserOnHost(String vespaUserOnHost) {
this.vespaUserOnHost = vespaUserOnHost;
return this;
}
/** Sets the file system to use for paths. */
public Builder fileSystem(FileSystem fileSystem) {
this.fileSystem = fileSystem;
return this;
}
public Builder cpuSpeedUp(double cpuSpeedUp) {
this.cpuSpeedUp = cpuSpeedUp;
return this;
}
} | class Builder {
private NodeSpec.Builder nodeSpecBuilder;
private Acl acl;
private AthenzIdentity identity;
private DockerNetworking dockerNetworking;
private ZoneApi zone;
private String vespaUser;
private String vespaUserOnHost;
private FileSystem fileSystem = FileSystems.getDefault();
private double cpuSpeedUp = 1;
public Builder(NodeSpec node) {
this.nodeSpecBuilder = new NodeSpec.Builder(node);
}
/**
* Creates a NodeAgentContext.Builder with a NodeSpec that has the given hostname and some
* reasonable values for the remaining required NodeSpec fields. Use {@link
* if you want to control the entire NodeSpec.
*/
public Builder(String hostname) {
this.nodeSpecBuilder = NodeSpec.Builder.testSpec(hostname);
}
public Builder nodeSpecBuilder(Function<NodeSpec.Builder, NodeSpec.Builder> nodeSpecBuilderModifier) {
this.nodeSpecBuilder = nodeSpecBuilderModifier.apply(nodeSpecBuilder);
return this;
}
public Builder acl(Acl acl) {
this.acl = acl;
return this;
}
public Builder identity(AthenzIdentity identity) {
this.identity = identity;
return this;
}
public Builder dockerNetworking(DockerNetworking dockerNetworking) {
this.dockerNetworking = dockerNetworking;
return this;
}
public Builder zone(ZoneApi zone) {
this.zone = zone;
return this;
}
public Builder vespaUser(String vespaUser) {
this.vespaUser = vespaUser;
return this;
}
public Builder vespaUserOnHost(String vespaUserOnHost) {
this.vespaUserOnHost = vespaUserOnHost;
return this;
}
/** Sets the file system to use for paths. */
public Builder fileSystem(FileSystem fileSystem) {
this.fileSystem = fileSystem;
return this;
}
public Builder cpuSpeedUp(double cpuSpeedUp) {
this.cpuSpeedUp = cpuSpeedUp;
return this;
}
} |
yes, but it matches the expected path. I was hoping this could lead to simplifying the NodeAgentContextImpl constructor by removing 2 path parameters - but I ended up keeping them. | public NodeAgentContextImpl build() {
return new NodeAgentContextImpl(
nodeSpecBuilder.build(),
Optional.ofNullable(acl).orElse(Acl.EMPTY),
Optional.ofNullable(identity).orElseGet(() -> new AthenzService("domain", "service")),
Optional.ofNullable(dockerNetworking).orElse(DockerNetworking.HOST_NETWORK),
Optional.ofNullable(zone).orElseGet(() -> new ZoneApi() {
@Override
public SystemName getSystemName() {
return SystemName.defaultSystem();
}
@Override
public ZoneId getId() {
return ZoneId.defaultId();
}
@Override
public CloudName getCloudName() {
return CloudName.defaultName();
}
@Override
public String getCloudNativeRegionName() {
return getId().region().value();
}
}),
fileSystem,
fileSystem.getPath("/home/docker/container-storage"),
fileSystem.getPath("/opt/vespa"),
Optional.ofNullable(vespaUser).orElse("vespa"),
Optional.ofNullable(vespaUserOnHost).orElse("container_vespa"),
cpuSpeedUp);
} | fileSystem.getPath("/home/docker/container-storage"), | public NodeAgentContextImpl build() {
return new NodeAgentContextImpl(
nodeSpecBuilder.build(),
Optional.ofNullable(acl).orElse(Acl.EMPTY),
Optional.ofNullable(identity).orElseGet(() -> new AthenzService("domain", "service")),
Optional.ofNullable(dockerNetworking).orElse(DockerNetworking.HOST_NETWORK),
Optional.ofNullable(zone).orElseGet(() -> new ZoneApi() {
@Override
public SystemName getSystemName() {
return SystemName.defaultSystem();
}
@Override
public ZoneId getId() {
return ZoneId.defaultId();
}
@Override
public CloudName getCloudName() {
return CloudName.defaultName();
}
@Override
public String getCloudNativeRegionName() {
return getId().region().value();
}
}),
fileSystem,
fileSystem.getPath("/home/docker/container-storage"),
fileSystem.getPath("/opt/vespa"),
Optional.ofNullable(vespaUser).orElse("vespa"),
Optional.ofNullable(vespaUserOnHost).orElse("container_vespa"),
cpuSpeedUp);
} | class Builder {
private NodeSpec.Builder nodeSpecBuilder;
private Acl acl;
private AthenzIdentity identity;
private DockerNetworking dockerNetworking;
private ZoneApi zone;
private String vespaUser;
private String vespaUserOnHost;
private FileSystem fileSystem = FileSystems.getDefault();
private double cpuSpeedUp = 1;
public Builder(NodeSpec node) {
this.nodeSpecBuilder = new NodeSpec.Builder(node);
}
/**
* Creates a NodeAgentContext.Builder with a NodeSpec that has the given hostname and some
* reasonable values for the remaining required NodeSpec fields. Use {@link
* if you want to control the entire NodeSpec.
*/
public Builder(String hostname) {
this.nodeSpecBuilder = NodeSpec.Builder.testSpec(hostname);
}
public Builder nodeSpecBuilder(Function<NodeSpec.Builder, NodeSpec.Builder> nodeSpecBuilderModifier) {
this.nodeSpecBuilder = nodeSpecBuilderModifier.apply(nodeSpecBuilder);
return this;
}
public Builder acl(Acl acl) {
this.acl = acl;
return this;
}
public Builder identity(AthenzIdentity identity) {
this.identity = identity;
return this;
}
public Builder dockerNetworking(DockerNetworking dockerNetworking) {
this.dockerNetworking = dockerNetworking;
return this;
}
public Builder zone(ZoneApi zone) {
this.zone = zone;
return this;
}
public Builder vespaUser(String vespaUser) {
this.vespaUser = vespaUser;
return this;
}
public Builder vespaUserOnHost(String vespaUserOnHost) {
this.vespaUserOnHost = vespaUserOnHost;
return this;
}
/** Sets the file system to use for paths. */
public Builder fileSystem(FileSystem fileSystem) {
this.fileSystem = fileSystem;
return this;
}
public Builder cpuSpeedUp(double cpuSpeedUp) {
this.cpuSpeedUp = cpuSpeedUp;
return this;
}
} | class Builder {
private NodeSpec.Builder nodeSpecBuilder;
private Acl acl;
private AthenzIdentity identity;
private DockerNetworking dockerNetworking;
private ZoneApi zone;
private String vespaUser;
private String vespaUserOnHost;
private FileSystem fileSystem = FileSystems.getDefault();
private double cpuSpeedUp = 1;
public Builder(NodeSpec node) {
this.nodeSpecBuilder = new NodeSpec.Builder(node);
}
/**
* Creates a NodeAgentContext.Builder with a NodeSpec that has the given hostname and some
* reasonable values for the remaining required NodeSpec fields. Use {@link
* if you want to control the entire NodeSpec.
*/
public Builder(String hostname) {
this.nodeSpecBuilder = NodeSpec.Builder.testSpec(hostname);
}
public Builder nodeSpecBuilder(Function<NodeSpec.Builder, NodeSpec.Builder> nodeSpecBuilderModifier) {
this.nodeSpecBuilder = nodeSpecBuilderModifier.apply(nodeSpecBuilder);
return this;
}
public Builder acl(Acl acl) {
this.acl = acl;
return this;
}
public Builder identity(AthenzIdentity identity) {
this.identity = identity;
return this;
}
public Builder dockerNetworking(DockerNetworking dockerNetworking) {
this.dockerNetworking = dockerNetworking;
return this;
}
public Builder zone(ZoneApi zone) {
this.zone = zone;
return this;
}
public Builder vespaUser(String vespaUser) {
this.vespaUser = vespaUser;
return this;
}
public Builder vespaUserOnHost(String vespaUserOnHost) {
this.vespaUserOnHost = vespaUserOnHost;
return this;
}
/** Sets the file system to use for paths. */
public Builder fileSystem(FileSystem fileSystem) {
this.fileSystem = fileSystem;
return this;
}
public Builder cpuSpeedUp(double cpuSpeedUp) {
this.cpuSpeedUp = cpuSpeedUp;
return this;
}
} |
Consider `toUnmodifiableMap` to make it immutable. | public Application(ApplicationId id, Collection<Cluster> clusters) {
this.id = id;
this.clusters = clusters.stream().collect(Collectors.toMap(c -> c.id(), c -> c));
} | this.clusters = clusters.stream().collect(Collectors.toMap(c -> c.id(), c -> c)); | public Application(ApplicationId id, Collection<Cluster> clusters) {
this.id = id;
this.clusters = clusters.stream().collect(Collectors.toUnmodifiableMap(c -> c.id(), c -> c));
} | class Application {
private ApplicationId id;
private Map<ClusterSpec.Id, Cluster> clusters;
public ApplicationId id() { return id; }
public Map<ClusterSpec.Id, Cluster> clusters() { return clusters; }
@Override
public String toString() {
return "application '" + id + "'";
}
} | class Application {
private ApplicationId id;
private Map<ClusterSpec.Id, Cluster> clusters;
public ApplicationId id() { return id; }
public Map<ClusterSpec.Id, Cluster> clusters() { return clusters; }
@Override
public String toString() {
return "application '" + id + "'";
}
} |
Done, thanks | public Application(ApplicationId id, Collection<Cluster> clusters) {
this.id = id;
this.clusters = clusters.stream().collect(Collectors.toMap(c -> c.id(), c -> c));
} | this.clusters = clusters.stream().collect(Collectors.toMap(c -> c.id(), c -> c)); | public Application(ApplicationId id, Collection<Cluster> clusters) {
this.id = id;
this.clusters = clusters.stream().collect(Collectors.toUnmodifiableMap(c -> c.id(), c -> c));
} | class Application {
private ApplicationId id;
private Map<ClusterSpec.Id, Cluster> clusters;
public ApplicationId id() { return id; }
public Map<ClusterSpec.Id, Cluster> clusters() { return clusters; }
@Override
public String toString() {
return "application '" + id + "'";
}
} | class Application {
private ApplicationId id;
private Map<ClusterSpec.Id, Cluster> clusters;
public ApplicationId id() { return id; }
public Map<ClusterSpec.Id, Cluster> clusters() { return clusters; }
@Override
public String toString() {
return "application '" + id + "'";
}
} |
```suggestion return host.resources().compatibleWith(requestedNodes.resources().get()); ``` order matters for remote disk | private boolean fitsPerfectly(Node host) {
return requestedNodes.resources().get().compatibleWith(host.resources());
} | return requestedNodes.resources().get().compatibleWith(host.resources()); | private boolean fitsPerfectly(Node host) {
return host.resources().compatibleWith(requestedNodes.resources().get());
} | class NodePrioritizer {
private final List<NodeCandidate> candidates = new ArrayList<>();
private final LockedNodeList allNodes;
private final HostCapacity capacity;
private final NodeSpec requestedNodes;
private final ApplicationId application;
private final ClusterSpec clusterSpec;
private final NameResolver nameResolver;
private final Nodes nodes;
private final boolean dynamicProvisioning;
private final boolean canAllocateToSpareHosts;
private final boolean topologyChange;
private final int currentClusterSize;
private final Set<Node> spareHosts;
private final boolean enclave;
public NodePrioritizer(LockedNodeList allNodes, ApplicationId application, ClusterSpec clusterSpec, NodeSpec nodeSpec,
int wantedGroups, boolean dynamicProvisioning, NameResolver nameResolver, Nodes nodes,
HostResourcesCalculator hostResourcesCalculator, int spareCount, boolean enclave) {
this.allNodes = allNodes;
this.capacity = new HostCapacity(this.allNodes, hostResourcesCalculator);
this.requestedNodes = nodeSpec;
this.clusterSpec = clusterSpec;
this.application = application;
this.dynamicProvisioning = dynamicProvisioning;
this.spareHosts = dynamicProvisioning ?
capacity.findSpareHostsInDynamicallyProvisionedZones(this.allNodes.asList()) :
capacity.findSpareHosts(this.allNodes.asList(), spareCount);
this.nameResolver = nameResolver;
this.nodes = nodes;
this.enclave = enclave;
NodeList nodesInCluster = this.allNodes.owner(application).type(clusterSpec.type()).cluster(clusterSpec.id());
NodeList nonRetiredNodesInCluster = nodesInCluster.not().retired();
long currentGroups = nonRetiredNodesInCluster.state(Node.State.active).stream()
.flatMap(node -> node.allocation()
.flatMap(alloc -> alloc.membership().cluster().group().map(ClusterSpec.Group::index))
.stream())
.distinct()
.count();
this.topologyChange = currentGroups != wantedGroups;
this.currentClusterSize = (int) nonRetiredNodesInCluster.state(Node.State.active).stream()
.map(node -> node.allocation().flatMap(alloc -> alloc.membership().cluster().group()))
.filter(clusterSpec.group()::equals)
.count();
this.canAllocateToSpareHosts = dynamicProvisioning || isReplacement(nodesInCluster, clusterSpec.group());
}
/** Collects all node candidates for this application and returns them in the most-to-least preferred order */
public List<NodeCandidate> collect(List<Node> surplusActiveNodes) {
addApplicationNodes();
addSurplusNodes(surplusActiveNodes);
addReadyNodes();
addCandidatesOnExistingHosts();
return prioritize();
}
/** Returns the list of nodes sorted by {@link NodeCandidate
private List<NodeCandidate> prioritize() {
Map<String, List<NodeCandidate>> candidatesBySwitch = this.candidates.stream()
.collect(Collectors.groupingBy(candidate -> candidate.parent.orElseGet(candidate::toNode)
.switchHostname()
.orElse("")));
List<NodeCandidate> nodes = new ArrayList<>(this.candidates.size());
for (var clusterSwitch : candidatesBySwitch.keySet()) {
List<NodeCandidate> switchCandidates = candidatesBySwitch.get(clusterSwitch);
if (clusterSwitch.isEmpty()) {
nodes.addAll(switchCandidates);
} else {
Collections.sort(switchCandidates);
NodeCandidate bestNode = switchCandidates.get(0);
nodes.add(bestNode);
for (var node : switchCandidates.subList(1, switchCandidates.size())) {
nodes.add(node.withExclusiveSwitch(false));
}
}
}
Collections.sort(nodes);
return nodes;
}
/**
* Add nodes that have been previously reserved to the same application from
* an earlier downsizing of a cluster
*/
private void addSurplusNodes(List<Node> surplusNodes) {
for (Node node : surplusNodes) {
NodeCandidate candidate = candidateFrom(node, true);
if (!candidate.violatesSpares || canAllocateToSpareHosts) {
candidates.add(candidate);
}
}
}
/** Add a node on each host with enough capacity for the requested flavor */
private void addCandidatesOnExistingHosts() {
if (requestedNodes.resources().isEmpty()) return;
for (Node host : allNodes) {
if ( ! nodes.canAllocateTenantNodeTo(host, dynamicProvisioning)) continue;
if (nodes.suspended(host)) continue;
if (host.reservedTo().isPresent() && !host.reservedTo().get().equals(application.tenant())) continue;
if (host.reservedTo().isPresent() && application.instance().isTester()) continue;
if (host.exclusiveToApplicationId().isPresent() && ! fitsPerfectly(host)) continue;
if ( ! host.exclusiveToClusterType().map(clusterSpec.type()::equals).orElse(true)) continue;
if (spareHosts.contains(host) && !canAllocateToSpareHosts) continue;
if ( ! capacity.hasCapacity(host, requestedNodes.resources().get())) continue;
if ( ! allNodes.childrenOf(host).owner(application).cluster(clusterSpec.id()).isEmpty()) continue;
candidates.add(NodeCandidate.createNewChild(requestedNodes.resources().get(),
capacity.availableCapacityOf(host),
host,
spareHosts.contains(host),
allNodes,
nameResolver,
!enclave));
}
}
/** Add existing nodes allocated to the application */
private void addApplicationNodes() {
EnumSet<Node.State> legalStates = EnumSet.of(Node.State.active, Node.State.inactive, Node.State.reserved);
allNodes.stream()
.filter(node -> node.type() == requestedNodes.type())
.filter(node -> legalStates.contains(node.state()))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(application))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.filter(node -> node.state() == Node.State.active || canStillAllocate(node))
.map(node -> candidateFrom(node, false))
.forEach(candidates::add);
}
/** Add nodes already provisioned, but not allocated to any application */
private void addReadyNodes() {
allNodes.stream()
.filter(node -> node.type() == requestedNodes.type())
.filter(node -> node.state() == Node.State.ready)
.map(node -> candidateFrom(node, false))
.filter(n -> !n.violatesSpares || canAllocateToSpareHosts)
.forEach(candidates::add);
}
/** Create a candidate from given pre-existing node */
private NodeCandidate candidateFrom(Node node, boolean isSurplus) {
Optional<Node> optionalParent = allNodes.parentOf(node);
if (optionalParent.isPresent()) {
Node parent = optionalParent.get();
return NodeCandidate.createChild(node,
capacity.availableCapacityOf(parent),
parent,
spareHosts.contains(parent),
isSurplus,
false,
parent.exclusiveToApplicationId().isEmpty()
&& requestedNodes.canResize(node.resources(),
capacity.unusedCapacityOf(parent),
clusterSpec.type(),
topologyChange,
currentClusterSize));
} else {
return NodeCandidate.createStandalone(node, isSurplus, false);
}
}
/** Returns whether we are allocating to replace a failed node */
private boolean isReplacement(NodeList nodesInCluster, Optional<ClusterSpec.Group> group) {
NodeList nodesInGroup = group.map(ClusterSpec.Group::index)
.map(nodesInCluster::group)
.orElse(nodesInCluster);
int failedNodesInGroup = nodesInGroup.failing().size() + nodesInGroup.state(Node.State.failed).size();
if (failedNodesInGroup == 0) return false;
return ! requestedNodes.fulfilledBy(nodesInGroup.size() - failedNodesInGroup);
}
/**
* We may regret that a non-active node is allocated to a host and not offer it to the application
* now, e.g if we want to retire the host.
*
* @return true if we still want to allocate the given node to its parent
*/
private boolean canStillAllocate(Node node) {
if (node.type() != NodeType.tenant || node.parentHostname().isEmpty()) return true;
Optional<Node> parent = allNodes.parentOf(node);
return parent.isPresent() && nodes.canAllocateTenantNodeTo(parent.get(), dynamicProvisioning);
}
} | class NodePrioritizer {
private final List<NodeCandidate> candidates = new ArrayList<>();
private final LockedNodeList allNodes;
private final HostCapacity capacity;
private final NodeSpec requestedNodes;
private final ApplicationId application;
private final ClusterSpec clusterSpec;
private final NameResolver nameResolver;
private final Nodes nodes;
private final boolean dynamicProvisioning;
private final boolean canAllocateToSpareHosts;
private final boolean topologyChange;
private final int currentClusterSize;
private final Set<Node> spareHosts;
private final boolean enclave;
public NodePrioritizer(LockedNodeList allNodes, ApplicationId application, ClusterSpec clusterSpec, NodeSpec nodeSpec,
int wantedGroups, boolean dynamicProvisioning, NameResolver nameResolver, Nodes nodes,
HostResourcesCalculator hostResourcesCalculator, int spareCount, boolean enclave) {
this.allNodes = allNodes;
this.capacity = new HostCapacity(this.allNodes, hostResourcesCalculator);
this.requestedNodes = nodeSpec;
this.clusterSpec = clusterSpec;
this.application = application;
this.dynamicProvisioning = dynamicProvisioning;
this.spareHosts = dynamicProvisioning ?
capacity.findSpareHostsInDynamicallyProvisionedZones(this.allNodes.asList()) :
capacity.findSpareHosts(this.allNodes.asList(), spareCount);
this.nameResolver = nameResolver;
this.nodes = nodes;
this.enclave = enclave;
NodeList nodesInCluster = this.allNodes.owner(application).type(clusterSpec.type()).cluster(clusterSpec.id());
NodeList nonRetiredNodesInCluster = nodesInCluster.not().retired();
long currentGroups = nonRetiredNodesInCluster.state(Node.State.active).stream()
.flatMap(node -> node.allocation()
.flatMap(alloc -> alloc.membership().cluster().group().map(ClusterSpec.Group::index))
.stream())
.distinct()
.count();
this.topologyChange = currentGroups != wantedGroups;
this.currentClusterSize = (int) nonRetiredNodesInCluster.state(Node.State.active).stream()
.map(node -> node.allocation().flatMap(alloc -> alloc.membership().cluster().group()))
.filter(clusterSpec.group()::equals)
.count();
this.canAllocateToSpareHosts = dynamicProvisioning || isReplacement(nodesInCluster, clusterSpec.group());
}
/** Collects all node candidates for this application and returns them in the most-to-least preferred order */
public List<NodeCandidate> collect(List<Node> surplusActiveNodes) {
addApplicationNodes();
addSurplusNodes(surplusActiveNodes);
addReadyNodes();
addCandidatesOnExistingHosts();
return prioritize();
}
/** Returns the list of nodes sorted by {@link NodeCandidate
private List<NodeCandidate> prioritize() {
Map<String, List<NodeCandidate>> candidatesBySwitch = this.candidates.stream()
.collect(Collectors.groupingBy(candidate -> candidate.parent.orElseGet(candidate::toNode)
.switchHostname()
.orElse("")));
List<NodeCandidate> nodes = new ArrayList<>(this.candidates.size());
for (var clusterSwitch : candidatesBySwitch.keySet()) {
List<NodeCandidate> switchCandidates = candidatesBySwitch.get(clusterSwitch);
if (clusterSwitch.isEmpty()) {
nodes.addAll(switchCandidates);
} else {
Collections.sort(switchCandidates);
NodeCandidate bestNode = switchCandidates.get(0);
nodes.add(bestNode);
for (var node : switchCandidates.subList(1, switchCandidates.size())) {
nodes.add(node.withExclusiveSwitch(false));
}
}
}
Collections.sort(nodes);
return nodes;
}
/**
* Add nodes that have been previously reserved to the same application from
* an earlier downsizing of a cluster
*/
private void addSurplusNodes(List<Node> surplusNodes) {
for (Node node : surplusNodes) {
NodeCandidate candidate = candidateFrom(node, true);
if (!candidate.violatesSpares || canAllocateToSpareHosts) {
candidates.add(candidate);
}
}
}
/** Add a node on each host with enough capacity for the requested flavor */
private void addCandidatesOnExistingHosts() {
if (requestedNodes.resources().isEmpty()) return;
for (Node host : allNodes) {
if ( ! nodes.canAllocateTenantNodeTo(host, dynamicProvisioning)) continue;
if (nodes.suspended(host)) continue;
if (host.reservedTo().isPresent() && !host.reservedTo().get().equals(application.tenant())) continue;
if (host.reservedTo().isPresent() && application.instance().isTester()) continue;
if (host.exclusiveToApplicationId().isPresent() && ! fitsPerfectly(host)) continue;
if ( ! host.exclusiveToClusterType().map(clusterSpec.type()::equals).orElse(true)) continue;
if (spareHosts.contains(host) && !canAllocateToSpareHosts) continue;
if ( ! capacity.hasCapacity(host, requestedNodes.resources().get())) continue;
if ( ! allNodes.childrenOf(host).owner(application).cluster(clusterSpec.id()).isEmpty()) continue;
candidates.add(NodeCandidate.createNewChild(requestedNodes.resources().get(),
capacity.availableCapacityOf(host),
host,
spareHosts.contains(host),
allNodes,
nameResolver,
!enclave));
}
}
/** Add existing nodes allocated to the application */
private void addApplicationNodes() {
EnumSet<Node.State> legalStates = EnumSet.of(Node.State.active, Node.State.inactive, Node.State.reserved);
allNodes.stream()
.filter(node -> node.type() == requestedNodes.type())
.filter(node -> legalStates.contains(node.state()))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(application))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.filter(node -> node.state() == Node.State.active || canStillAllocate(node))
.map(node -> candidateFrom(node, false))
.forEach(candidates::add);
}
/** Add nodes already provisioned, but not allocated to any application */
private void addReadyNodes() {
allNodes.stream()
.filter(node -> node.type() == requestedNodes.type())
.filter(node -> node.state() == Node.State.ready)
.map(node -> candidateFrom(node, false))
.filter(n -> !n.violatesSpares || canAllocateToSpareHosts)
.forEach(candidates::add);
}
/** Create a candidate from given pre-existing node */
private NodeCandidate candidateFrom(Node node, boolean isSurplus) {
Optional<Node> optionalParent = allNodes.parentOf(node);
if (optionalParent.isPresent()) {
Node parent = optionalParent.get();
return NodeCandidate.createChild(node,
capacity.availableCapacityOf(parent),
parent,
spareHosts.contains(parent),
isSurplus,
false,
parent.exclusiveToApplicationId().isEmpty()
&& requestedNodes.canResize(node.resources(),
capacity.unusedCapacityOf(parent),
clusterSpec.type(),
topologyChange,
currentClusterSize));
} else {
return NodeCandidate.createStandalone(node, isSurplus, false);
}
}
/** Returns whether we are allocating to replace a failed node */
private boolean isReplacement(NodeList nodesInCluster, Optional<ClusterSpec.Group> group) {
NodeList nodesInGroup = group.map(ClusterSpec.Group::index)
.map(nodesInCluster::group)
.orElse(nodesInCluster);
int failedNodesInGroup = nodesInGroup.failing().size() + nodesInGroup.state(Node.State.failed).size();
if (failedNodesInGroup == 0) return false;
return ! requestedNodes.fulfilledBy(nodesInGroup.size() - failedNodesInGroup);
}
/**
* We may regret that a non-active node is allocated to a host and not offer it to the application
* now, e.g if we want to retire the host.
*
* @return true if we still want to allocate the given node to its parent
*/
private boolean canStillAllocate(Node node) {
if (node.type() != NodeType.tenant || node.parentHostname().isEmpty()) return true;
Optional<Node> parent = allNodes.parentOf(node);
return parent.isPresent() && nodes.canAllocateTenantNodeTo(parent.get(), dynamicProvisioning);
}
} |
Ah, thanks, that's why it was like it was in the first place! | public void unregisterSession(String session, NetworkOwner owner, boolean broadcast) {
sessions.computeIfPresent(session, (name, owners) -> {
if (owners.equals(List.of(owner))) {
if (broadcast)
net.unregisterSession(session);
return null;
}
owners.remove(owner);
return owners;
});
} | if (owners.equals(List.of(owner))) { | public void unregisterSession(String session, NetworkOwner owner, boolean broadcast) {
sessions.computeIfPresent(session, (name, owners) -> {
if (owners.size() == 1 && owners.contains(owner)) {
if (broadcast)
net.unregisterSession(session);
return null;
}
owners.remove(owner);
return owners;
});
} | class NetworkMultiplexer implements NetworkOwner {
private static final Logger log = Logger.getLogger(NetworkMultiplexer.class.getName());
private final Network net;
private final Deque<NetworkOwner> owners = new ConcurrentLinkedDeque<>();
private final Map<String, Deque<NetworkOwner>> sessions = new ConcurrentHashMap<>();
private final AtomicBoolean disowned;
private NetworkMultiplexer(Network net, boolean shared) {
net.attach(this);
this.net = net;
this.disowned = new AtomicBoolean( ! shared);
}
/** Returns a network multiplexer which will be shared between several {@link NetworkOwner}s,
* and will shut down when all these have detached, and {@link
public static NetworkMultiplexer shared(Network net) {
return new NetworkMultiplexer(net, true);
}
/** Returns a network multiplexer with a single {@link NetworkOwner}, which shuts down when this owner detaches. */
public static NetworkMultiplexer dedicated(Network net) {
return new NetworkMultiplexer(net, false);
}
public void registerSession(String session, NetworkOwner owner, boolean broadcast) {
sessions.compute(session, (name, owners) -> {
if (owners == null) {
owners = new ConcurrentLinkedDeque<>();
if (broadcast)
net.registerSession(session);
}
else if (owners.contains(owner))
throw new IllegalArgumentException("Session '" + session + "' with owner '" + owner + "' already registered with " + this);
owners.push(owner);
return owners;
});
}
@Override
public Protocol getProtocol(Utf8Array name) {
Protocol protocol = null;
for (NetworkOwner owner : owners)
protocol = owner.getProtocol(name) == null ? protocol : owner.getProtocol(name);
return protocol;
}
@Override
public void deliverMessage(Message message, String session) {
NetworkOwner owner = sessions.getOrDefault(session, owners).peek();
if (owner == null) {
log.warning(this + " received message '" + message + "' with no owners attached");
message.discard();
}
else
owner.deliverMessage(message, session);
}
/** Attach the network owner to this, allowing this to forward messages to it. */
public void attach(NetworkOwner owner) {
if (owners.contains(owner))
throw new IllegalArgumentException(owner + " is already attached to " + this);
owners.add(owner);
}
/** Detach the network owner from this, no longer allowing messages to it, and shutting down this is ownerless. */
public void detach(NetworkOwner owner) {
if ( ! owners.remove(owner))
throw new IllegalArgumentException(owner + " not attached to " + this);
destroyIfOwnerless();
}
/** Signal that external ownership of this is relinquished, allowing destruction on last owner detachment. */
public void disown() {
if (disowned.getAndSet(true))
throw new IllegalStateException("Destroy called on a dedicated multiplexer--" +
"this automatically shuts down when detached from--or " +
"called multiple times on a shared multiplexer");
destroyIfOwnerless();
}
private void destroyIfOwnerless() {
if (disowned.get() && owners.isEmpty())
net.shutdown();
}
public Network net() {
return net;
}
@Override
public String toString() {
return "network multiplexer with owners: " + owners + ", sessions: " + sessions + " and destructible: " + disowned.get();
}
} | class NetworkMultiplexer implements NetworkOwner {
private static final Logger log = Logger.getLogger(NetworkMultiplexer.class.getName());
private final Network net;
private final Deque<NetworkOwner> owners = new ConcurrentLinkedDeque<>();
private final Map<String, Deque<NetworkOwner>> sessions = new ConcurrentHashMap<>();
private final AtomicBoolean disowned;
private NetworkMultiplexer(Network net, boolean shared) {
net.attach(this);
this.net = net;
this.disowned = new AtomicBoolean( ! shared);
}
/** Returns a network multiplexer which will be shared between several {@link NetworkOwner}s,
* and will shut down when all these have detached, and {@link
public static NetworkMultiplexer shared(Network net) {
return new NetworkMultiplexer(net, true);
}
/** Returns a network multiplexer with a single {@link NetworkOwner}, which shuts down when this owner detaches. */
public static NetworkMultiplexer dedicated(Network net) {
return new NetworkMultiplexer(net, false);
}
public void registerSession(String session, NetworkOwner owner, boolean broadcast) {
sessions.compute(session, (name, owners) -> {
if (owners == null) {
owners = new ConcurrentLinkedDeque<>();
if (broadcast)
net.registerSession(session);
}
else if (owners.contains(owner))
throw new IllegalArgumentException("Session '" + session + "' with owner '" + owner + "' already registered with " + this);
owners.push(owner);
return owners;
});
}
@Override
public Protocol getProtocol(Utf8Array name) {
Protocol protocol = null;
for (NetworkOwner owner : owners)
protocol = owner.getProtocol(name) == null ? protocol : owner.getProtocol(name);
return protocol;
}
@Override
public void deliverMessage(Message message, String session) {
NetworkOwner owner = sessions.getOrDefault(session, owners).peek();
if (owner == null) {
log.warning(this + " received message '" + message + "' with no owners attached");
message.discard();
}
else
owner.deliverMessage(message, session);
}
/** Attach the network owner to this, allowing this to forward messages to it. */
public void attach(NetworkOwner owner) {
if (owners.contains(owner))
throw new IllegalArgumentException(owner + " is already attached to " + this);
owners.add(owner);
}
/** Detach the network owner from this, no longer allowing messages to it, and shutting down this is ownerless. */
public void detach(NetworkOwner owner) {
if ( ! owners.remove(owner))
throw new IllegalArgumentException(owner + " not attached to " + this);
destroyIfOwnerless();
}
/** Signal that external ownership of this is relinquished, allowing destruction on last owner detachment. */
public void disown() {
if (disowned.getAndSet(true))
throw new IllegalStateException("Destroy called on a dedicated multiplexer--" +
"this automatically shuts down when detached from--or " +
"called multiple times on a shared multiplexer");
destroyIfOwnerless();
}
private void destroyIfOwnerless() {
if (disowned.get() && owners.isEmpty())
net.shutdown();
}
public Network net() {
return net;
}
@Override
public String toString() {
return "network multiplexer with owners: " + owners + ", sessions: " + sessions + " and destructible: " + disowned.get();
}
} |
What does the empty string mean? | void distributeApplicationPackage() {
FileRegistry fileRegistry = fileDistributionProvider.getFileRegistry();
FileReference fileReference = fileRegistry.addFile("");
FileDistribution fileDistribution = fileDistributionProvider.getFileDistribution();
log.log(Level.INFO, "Distribute application package for " + applicationId + " (" + fileReference + ") to other config servers");
properties.configServerSpecs().stream()
.filter(spec -> ! spec.getHostName().equals(fileRegistry.fileSourceHost()))
.forEach(spec -> fileDistribution.startDownload(spec.getHostName(), spec.getConfigServerPort(), Set.of(fileReference)));
} | FileReference fileReference = fileRegistry.addFile(""); | void distributeApplicationPackage() {
if ( ! distributeApplicationPackage.value()) return;
FileRegistry fileRegistry = fileDistributionProvider.getFileRegistry();
FileReference fileReference = fileRegistry.addFile("");
FileDistribution fileDistribution = fileDistributionProvider.getFileDistribution();
log.log(Level.INFO, "Distribute application package for " + applicationId + " (" + fileReference + ") to other config servers");
properties.configServerSpecs().stream()
.filter(spec -> ! spec.getHostName().equals(fileRegistry.fileSourceHost()))
.forEach(spec -> fileDistribution.startDownload(spec.getHostName(), spec.getConfigServerPort(), Set.of(fileReference)));
} | class Preparation {
final SessionContext context;
final DeployLogger logger;
final PrepareParams params;
final Optional<ApplicationSet> currentActiveApplicationSet;
final Path tenantPath;
final ApplicationId applicationId;
/** The repository part of docker image to be used for this deployment */
final Optional<DockerImage> dockerImageRepository;
/** The version of Vespa the application to be prepared specifies for its nodes */
final Version vespaVersion;
final ContainerEndpointsCache containerEndpoints;
final Set<ContainerEndpoint> endpointsSet;
final ModelContext.Properties properties;
private final EndpointCertificateMetadataStore endpointCertificateMetadataStore;
private final EndpointCertificateRetriever endpointCertificateRetriever;
private final Optional<EndpointCertificateMetadata> endpointCertificateMetadata;
private final Optional<EndpointCertificateSecrets> endpointCertificateSecrets;
private final Optional<AthenzDomain> athenzDomain;
private ApplicationPackage applicationPackage;
private List<PreparedModelsBuilder.PreparedModelResult> modelResultList;
private PrepareResult prepareResult;
private final PreparedModelsBuilder preparedModelsBuilder;
private final FileDistributionProvider fileDistributionProvider;
Preparation(SessionContext context, DeployLogger logger, PrepareParams params,
Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath) {
this.context = context;
this.logger = logger;
this.params = params;
this.currentActiveApplicationSet = currentActiveApplicationSet;
this.tenantPath = tenantPath;
this.applicationId = params.getApplicationId();
this.dockerImageRepository = params.dockerImageRepository();
this.vespaVersion = params.vespaVersion().orElse(Vtag.currentVersion);
this.containerEndpoints = new ContainerEndpointsCache(tenantPath, curator);
this.endpointCertificateMetadataStore = new EndpointCertificateMetadataStore(curator, tenantPath);
this.endpointCertificateRetriever = new EndpointCertificateRetriever(secretStore);
this.endpointCertificateMetadata = params.endpointCertificateMetadata()
.or(() -> params.tlsSecretsKeyName().map(EndpointCertificateMetadataSerializer::fromString));
endpointCertificateSecrets = endpointCertificateMetadata
.or(() -> endpointCertificateMetadataStore.readEndpointCertificateMetadata(applicationId))
.flatMap(endpointCertificateRetriever::readEndpointCertificateSecrets);
this.endpointsSet = getEndpoints(params.containerEndpoints());
this.athenzDomain = params.athenzDomain();
this.properties = new ModelContextImpl.Properties(params.getApplicationId(),
configserverConfig.multitenant(),
ConfigServerSpec.fromConfig(configserverConfig),
HostName.from(configserverConfig.loadBalancerAddress()),
configserverConfig.ztsUrl() != null ? URI.create(configserverConfig.ztsUrl()) : null,
configserverConfig.athenzDnsSuffix(),
configserverConfig.hostedVespa(),
zone,
endpointsSet,
params.isBootstrap(),
currentActiveApplicationSet.isEmpty(),
context.getFlagSource(),
endpointCertificateSecrets,
athenzDomain);
this.fileDistributionProvider = fileDistributionFactory.createProvider(context.getServerDBSessionDir());
this.preparedModelsBuilder = new PreparedModelsBuilder(modelFactoryRegistry,
permanentApplicationPackage,
configDefinitionRepo,
fileDistributionProvider,
hostProvisionerProvider,
context,
logger,
params,
currentActiveApplicationSet,
properties,
configserverConfig);
}
void checkTimeout(String step) {
if (! params.getTimeoutBudget().hasTimeLeft()) {
String used = params.getTimeoutBudget().timesUsed();
throw new RuntimeException("prepare timed out "+used+" after "+step+" step: " + applicationId);
}
}
void preprocess() {
try {
this.applicationPackage = context.getApplicationPackage().preprocess(properties.zone(), logger);
} catch (IOException | TransformerException | ParserConfigurationException | SAXException e) {
throw new IllegalArgumentException("Error preprocessing application package for " + applicationId, e);
}
checkTimeout("preprocess");
}
AllocatedHosts buildModels(Instant now) {
SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>();
this.modelResultList = preparedModelsBuilder.buildModels(applicationId, dockerImageRepository, vespaVersion,
applicationPackage, allocatedHosts, now);
checkTimeout("build models");
return allocatedHosts.get();
}
void makeResult(AllocatedHosts allocatedHosts) {
this.prepareResult = new PrepareResult(allocatedHosts, modelResultList);
checkTimeout("making result from models");
}
void writeStateZK() {
log.log(Level.FINE, "Writing application package state to zookeeper");
writeStateToZooKeeper(context.getSessionZooKeeperClient(),
applicationPackage,
applicationId,
dockerImageRepository,
vespaVersion,
logger,
prepareResult.getFileRegistries(),
prepareResult.allocatedHosts(),
athenzDomain);
checkTimeout("write state to zookeeper");
}
void writeEndpointCertificateMetadataZK() {
endpointCertificateMetadata.ifPresent(metadata ->
endpointCertificateMetadataStore.writeEndpointCertificateMetadata(applicationId, metadata));
checkTimeout("write endpoint certificate metadata to zookeeper");
}
void writeContainerEndpointsZK() {
if (!params.containerEndpoints().isEmpty()) {
containerEndpoints.write(applicationId, params.containerEndpoints());
}
checkTimeout("write container endpoints to zookeeper");
}
void distribute() {
prepareResult.asList().forEach(modelResult -> modelResult.model
.distributeFiles(modelResult.fileDistributionProvider.getFileDistribution()));
checkTimeout("distribute files");
}
ConfigChangeActions result() {
return prepareResult.getConfigChangeActions();
}
private Set<ContainerEndpoint> getEndpoints(List<ContainerEndpoint> endpoints) {
if (endpoints == null || endpoints.isEmpty()) {
endpoints = this.containerEndpoints.read(applicationId);
}
return ImmutableSet.copyOf(endpoints);
}
} | class Preparation {
final SessionContext context;
final DeployLogger logger;
final PrepareParams params;
final Optional<ApplicationSet> currentActiveApplicationSet;
final Path tenantPath;
final ApplicationId applicationId;
/** The repository part of docker image to be used for this deployment */
final Optional<DockerImage> dockerImageRepository;
/** The version of Vespa the application to be prepared specifies for its nodes */
final Version vespaVersion;
final ContainerEndpointsCache containerEndpoints;
final Set<ContainerEndpoint> endpointsSet;
final ModelContext.Properties properties;
private final EndpointCertificateMetadataStore endpointCertificateMetadataStore;
private final EndpointCertificateRetriever endpointCertificateRetriever;
private final Optional<EndpointCertificateMetadata> endpointCertificateMetadata;
private final Optional<EndpointCertificateSecrets> endpointCertificateSecrets;
private final Optional<AthenzDomain> athenzDomain;
private ApplicationPackage applicationPackage;
private List<PreparedModelsBuilder.PreparedModelResult> modelResultList;
private PrepareResult prepareResult;
private final PreparedModelsBuilder preparedModelsBuilder;
private final FileDistributionProvider fileDistributionProvider;
Preparation(SessionContext context, DeployLogger logger, PrepareParams params,
Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath) {
this.context = context;
this.logger = logger;
this.params = params;
this.currentActiveApplicationSet = currentActiveApplicationSet;
this.tenantPath = tenantPath;
this.applicationId = params.getApplicationId();
this.dockerImageRepository = params.dockerImageRepository();
this.vespaVersion = params.vespaVersion().orElse(Vtag.currentVersion);
this.containerEndpoints = new ContainerEndpointsCache(tenantPath, curator);
this.endpointCertificateMetadataStore = new EndpointCertificateMetadataStore(curator, tenantPath);
this.endpointCertificateRetriever = new EndpointCertificateRetriever(secretStore);
this.endpointCertificateMetadata = params.endpointCertificateMetadata()
.or(() -> params.tlsSecretsKeyName().map(EndpointCertificateMetadataSerializer::fromString));
endpointCertificateSecrets = endpointCertificateMetadata
.or(() -> endpointCertificateMetadataStore.readEndpointCertificateMetadata(applicationId))
.flatMap(endpointCertificateRetriever::readEndpointCertificateSecrets);
this.endpointsSet = getEndpoints(params.containerEndpoints());
this.athenzDomain = params.athenzDomain();
this.properties = new ModelContextImpl.Properties(params.getApplicationId(),
configserverConfig.multitenant(),
ConfigServerSpec.fromConfig(configserverConfig),
HostName.from(configserverConfig.loadBalancerAddress()),
configserverConfig.ztsUrl() != null ? URI.create(configserverConfig.ztsUrl()) : null,
configserverConfig.athenzDnsSuffix(),
configserverConfig.hostedVespa(),
zone,
endpointsSet,
params.isBootstrap(),
currentActiveApplicationSet.isEmpty(),
context.getFlagSource(),
endpointCertificateSecrets,
athenzDomain);
this.fileDistributionProvider = fileDistributionFactory.createProvider(context.getServerDBSessionDir());
this.preparedModelsBuilder = new PreparedModelsBuilder(modelFactoryRegistry,
permanentApplicationPackage,
configDefinitionRepo,
fileDistributionProvider,
hostProvisionerProvider,
context,
logger,
params,
currentActiveApplicationSet,
properties,
configserverConfig);
}
void checkTimeout(String step) {
if (! params.getTimeoutBudget().hasTimeLeft()) {
String used = params.getTimeoutBudget().timesUsed();
throw new RuntimeException("prepare timed out "+used+" after "+step+" step: " + applicationId);
}
}
void preprocess() {
try {
this.applicationPackage = context.getApplicationPackage().preprocess(properties.zone(), logger);
} catch (IOException | TransformerException | ParserConfigurationException | SAXException e) {
throw new IllegalArgumentException("Error preprocessing application package for " + applicationId, e);
}
checkTimeout("preprocess");
}
AllocatedHosts buildModels(Instant now) {
SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>();
this.modelResultList = preparedModelsBuilder.buildModels(applicationId, dockerImageRepository, vespaVersion,
applicationPackage, allocatedHosts, now);
checkTimeout("build models");
return allocatedHosts.get();
}
void makeResult(AllocatedHosts allocatedHosts) {
this.prepareResult = new PrepareResult(allocatedHosts, modelResultList);
checkTimeout("making result from models");
}
void writeStateZK() {
log.log(Level.FINE, "Writing application package state to zookeeper");
writeStateToZooKeeper(context.getSessionZooKeeperClient(),
applicationPackage,
applicationId,
dockerImageRepository,
vespaVersion,
logger,
prepareResult.getFileRegistries(),
prepareResult.allocatedHosts(),
athenzDomain);
checkTimeout("write state to zookeeper");
}
void writeEndpointCertificateMetadataZK() {
endpointCertificateMetadata.ifPresent(metadata ->
endpointCertificateMetadataStore.writeEndpointCertificateMetadata(applicationId, metadata));
checkTimeout("write endpoint certificate metadata to zookeeper");
}
void writeContainerEndpointsZK() {
if (!params.containerEndpoints().isEmpty()) {
containerEndpoints.write(applicationId, params.containerEndpoints());
}
checkTimeout("write container endpoints to zookeeper");
}
void distribute() {
prepareResult.asList().forEach(modelResult -> modelResult.model
.distributeFiles(modelResult.fileDistributionProvider.getFileDistribution()));
checkTimeout("distribute files");
}
ConfigChangeActions result() {
return prepareResult.getConfigChangeActions();
}
private Set<ContainerEndpoint> getEndpoints(List<ContainerEndpoint> endpoints) {
if (endpoints == null || endpoints.isEmpty()) {
endpoints = this.containerEndpoints.read(applicationId);
}
return ImmutableSet.copyOf(endpoints);
}
} |
It's the relative path in the application package, so it basically means add "add everything in the application package" directory. Should probably add a new method to make this clearer, I'll look into it in the next PR. | void distributeApplicationPackage() {
FileRegistry fileRegistry = fileDistributionProvider.getFileRegistry();
FileReference fileReference = fileRegistry.addFile("");
FileDistribution fileDistribution = fileDistributionProvider.getFileDistribution();
log.log(Level.INFO, "Distribute application package for " + applicationId + " (" + fileReference + ") to other config servers");
properties.configServerSpecs().stream()
.filter(spec -> ! spec.getHostName().equals(fileRegistry.fileSourceHost()))
.forEach(spec -> fileDistribution.startDownload(spec.getHostName(), spec.getConfigServerPort(), Set.of(fileReference)));
} | FileReference fileReference = fileRegistry.addFile(""); | void distributeApplicationPackage() {
if ( ! distributeApplicationPackage.value()) return;
FileRegistry fileRegistry = fileDistributionProvider.getFileRegistry();
FileReference fileReference = fileRegistry.addFile("");
FileDistribution fileDistribution = fileDistributionProvider.getFileDistribution();
log.log(Level.INFO, "Distribute application package for " + applicationId + " (" + fileReference + ") to other config servers");
properties.configServerSpecs().stream()
.filter(spec -> ! spec.getHostName().equals(fileRegistry.fileSourceHost()))
.forEach(spec -> fileDistribution.startDownload(spec.getHostName(), spec.getConfigServerPort(), Set.of(fileReference)));
} | class Preparation {
final SessionContext context;
final DeployLogger logger;
final PrepareParams params;
final Optional<ApplicationSet> currentActiveApplicationSet;
final Path tenantPath;
final ApplicationId applicationId;
/** The repository part of docker image to be used for this deployment */
final Optional<DockerImage> dockerImageRepository;
/** The version of Vespa the application to be prepared specifies for its nodes */
final Version vespaVersion;
final ContainerEndpointsCache containerEndpoints;
final Set<ContainerEndpoint> endpointsSet;
final ModelContext.Properties properties;
private final EndpointCertificateMetadataStore endpointCertificateMetadataStore;
private final EndpointCertificateRetriever endpointCertificateRetriever;
private final Optional<EndpointCertificateMetadata> endpointCertificateMetadata;
private final Optional<EndpointCertificateSecrets> endpointCertificateSecrets;
private final Optional<AthenzDomain> athenzDomain;
private ApplicationPackage applicationPackage;
private List<PreparedModelsBuilder.PreparedModelResult> modelResultList;
private PrepareResult prepareResult;
private final PreparedModelsBuilder preparedModelsBuilder;
private final FileDistributionProvider fileDistributionProvider;
Preparation(SessionContext context, DeployLogger logger, PrepareParams params,
Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath) {
this.context = context;
this.logger = logger;
this.params = params;
this.currentActiveApplicationSet = currentActiveApplicationSet;
this.tenantPath = tenantPath;
this.applicationId = params.getApplicationId();
this.dockerImageRepository = params.dockerImageRepository();
this.vespaVersion = params.vespaVersion().orElse(Vtag.currentVersion);
this.containerEndpoints = new ContainerEndpointsCache(tenantPath, curator);
this.endpointCertificateMetadataStore = new EndpointCertificateMetadataStore(curator, tenantPath);
this.endpointCertificateRetriever = new EndpointCertificateRetriever(secretStore);
this.endpointCertificateMetadata = params.endpointCertificateMetadata()
.or(() -> params.tlsSecretsKeyName().map(EndpointCertificateMetadataSerializer::fromString));
endpointCertificateSecrets = endpointCertificateMetadata
.or(() -> endpointCertificateMetadataStore.readEndpointCertificateMetadata(applicationId))
.flatMap(endpointCertificateRetriever::readEndpointCertificateSecrets);
this.endpointsSet = getEndpoints(params.containerEndpoints());
this.athenzDomain = params.athenzDomain();
this.properties = new ModelContextImpl.Properties(params.getApplicationId(),
configserverConfig.multitenant(),
ConfigServerSpec.fromConfig(configserverConfig),
HostName.from(configserverConfig.loadBalancerAddress()),
configserverConfig.ztsUrl() != null ? URI.create(configserverConfig.ztsUrl()) : null,
configserverConfig.athenzDnsSuffix(),
configserverConfig.hostedVespa(),
zone,
endpointsSet,
params.isBootstrap(),
currentActiveApplicationSet.isEmpty(),
context.getFlagSource(),
endpointCertificateSecrets,
athenzDomain);
this.fileDistributionProvider = fileDistributionFactory.createProvider(context.getServerDBSessionDir());
this.preparedModelsBuilder = new PreparedModelsBuilder(modelFactoryRegistry,
permanentApplicationPackage,
configDefinitionRepo,
fileDistributionProvider,
hostProvisionerProvider,
context,
logger,
params,
currentActiveApplicationSet,
properties,
configserverConfig);
}
void checkTimeout(String step) {
if (! params.getTimeoutBudget().hasTimeLeft()) {
String used = params.getTimeoutBudget().timesUsed();
throw new RuntimeException("prepare timed out "+used+" after "+step+" step: " + applicationId);
}
}
void preprocess() {
try {
this.applicationPackage = context.getApplicationPackage().preprocess(properties.zone(), logger);
} catch (IOException | TransformerException | ParserConfigurationException | SAXException e) {
throw new IllegalArgumentException("Error preprocessing application package for " + applicationId, e);
}
checkTimeout("preprocess");
}
AllocatedHosts buildModels(Instant now) {
SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>();
this.modelResultList = preparedModelsBuilder.buildModels(applicationId, dockerImageRepository, vespaVersion,
applicationPackage, allocatedHosts, now);
checkTimeout("build models");
return allocatedHosts.get();
}
void makeResult(AllocatedHosts allocatedHosts) {
this.prepareResult = new PrepareResult(allocatedHosts, modelResultList);
checkTimeout("making result from models");
}
void writeStateZK() {
log.log(Level.FINE, "Writing application package state to zookeeper");
writeStateToZooKeeper(context.getSessionZooKeeperClient(),
applicationPackage,
applicationId,
dockerImageRepository,
vespaVersion,
logger,
prepareResult.getFileRegistries(),
prepareResult.allocatedHosts(),
athenzDomain);
checkTimeout("write state to zookeeper");
}
void writeEndpointCertificateMetadataZK() {
endpointCertificateMetadata.ifPresent(metadata ->
endpointCertificateMetadataStore.writeEndpointCertificateMetadata(applicationId, metadata));
checkTimeout("write endpoint certificate metadata to zookeeper");
}
void writeContainerEndpointsZK() {
if (!params.containerEndpoints().isEmpty()) {
containerEndpoints.write(applicationId, params.containerEndpoints());
}
checkTimeout("write container endpoints to zookeeper");
}
void distribute() {
prepareResult.asList().forEach(modelResult -> modelResult.model
.distributeFiles(modelResult.fileDistributionProvider.getFileDistribution()));
checkTimeout("distribute files");
}
ConfigChangeActions result() {
return prepareResult.getConfigChangeActions();
}
private Set<ContainerEndpoint> getEndpoints(List<ContainerEndpoint> endpoints) {
if (endpoints == null || endpoints.isEmpty()) {
endpoints = this.containerEndpoints.read(applicationId);
}
return ImmutableSet.copyOf(endpoints);
}
} | class Preparation {
final SessionContext context;
final DeployLogger logger;
final PrepareParams params;
final Optional<ApplicationSet> currentActiveApplicationSet;
final Path tenantPath;
final ApplicationId applicationId;
/** The repository part of docker image to be used for this deployment */
final Optional<DockerImage> dockerImageRepository;
/** The version of Vespa the application to be prepared specifies for its nodes */
final Version vespaVersion;
final ContainerEndpointsCache containerEndpoints;
final Set<ContainerEndpoint> endpointsSet;
final ModelContext.Properties properties;
private final EndpointCertificateMetadataStore endpointCertificateMetadataStore;
private final EndpointCertificateRetriever endpointCertificateRetriever;
private final Optional<EndpointCertificateMetadata> endpointCertificateMetadata;
private final Optional<EndpointCertificateSecrets> endpointCertificateSecrets;
private final Optional<AthenzDomain> athenzDomain;
private ApplicationPackage applicationPackage;
private List<PreparedModelsBuilder.PreparedModelResult> modelResultList;
private PrepareResult prepareResult;
private final PreparedModelsBuilder preparedModelsBuilder;
private final FileDistributionProvider fileDistributionProvider;
Preparation(SessionContext context, DeployLogger logger, PrepareParams params,
Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath) {
this.context = context;
this.logger = logger;
this.params = params;
this.currentActiveApplicationSet = currentActiveApplicationSet;
this.tenantPath = tenantPath;
this.applicationId = params.getApplicationId();
this.dockerImageRepository = params.dockerImageRepository();
this.vespaVersion = params.vespaVersion().orElse(Vtag.currentVersion);
this.containerEndpoints = new ContainerEndpointsCache(tenantPath, curator);
this.endpointCertificateMetadataStore = new EndpointCertificateMetadataStore(curator, tenantPath);
this.endpointCertificateRetriever = new EndpointCertificateRetriever(secretStore);
this.endpointCertificateMetadata = params.endpointCertificateMetadata()
.or(() -> params.tlsSecretsKeyName().map(EndpointCertificateMetadataSerializer::fromString));
endpointCertificateSecrets = endpointCertificateMetadata
.or(() -> endpointCertificateMetadataStore.readEndpointCertificateMetadata(applicationId))
.flatMap(endpointCertificateRetriever::readEndpointCertificateSecrets);
this.endpointsSet = getEndpoints(params.containerEndpoints());
this.athenzDomain = params.athenzDomain();
this.properties = new ModelContextImpl.Properties(params.getApplicationId(),
configserverConfig.multitenant(),
ConfigServerSpec.fromConfig(configserverConfig),
HostName.from(configserverConfig.loadBalancerAddress()),
configserverConfig.ztsUrl() != null ? URI.create(configserverConfig.ztsUrl()) : null,
configserverConfig.athenzDnsSuffix(),
configserverConfig.hostedVespa(),
zone,
endpointsSet,
params.isBootstrap(),
currentActiveApplicationSet.isEmpty(),
context.getFlagSource(),
endpointCertificateSecrets,
athenzDomain);
this.fileDistributionProvider = fileDistributionFactory.createProvider(context.getServerDBSessionDir());
this.preparedModelsBuilder = new PreparedModelsBuilder(modelFactoryRegistry,
permanentApplicationPackage,
configDefinitionRepo,
fileDistributionProvider,
hostProvisionerProvider,
context,
logger,
params,
currentActiveApplicationSet,
properties,
configserverConfig);
}
void checkTimeout(String step) {
if (! params.getTimeoutBudget().hasTimeLeft()) {
String used = params.getTimeoutBudget().timesUsed();
throw new RuntimeException("prepare timed out "+used+" after "+step+" step: " + applicationId);
}
}
void preprocess() {
try {
this.applicationPackage = context.getApplicationPackage().preprocess(properties.zone(), logger);
} catch (IOException | TransformerException | ParserConfigurationException | SAXException e) {
throw new IllegalArgumentException("Error preprocessing application package for " + applicationId, e);
}
checkTimeout("preprocess");
}
AllocatedHosts buildModels(Instant now) {
SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>();
this.modelResultList = preparedModelsBuilder.buildModels(applicationId, dockerImageRepository, vespaVersion,
applicationPackage, allocatedHosts, now);
checkTimeout("build models");
return allocatedHosts.get();
}
void makeResult(AllocatedHosts allocatedHosts) {
this.prepareResult = new PrepareResult(allocatedHosts, modelResultList);
checkTimeout("making result from models");
}
void writeStateZK() {
log.log(Level.FINE, "Writing application package state to zookeeper");
writeStateToZooKeeper(context.getSessionZooKeeperClient(),
applicationPackage,
applicationId,
dockerImageRepository,
vespaVersion,
logger,
prepareResult.getFileRegistries(),
prepareResult.allocatedHosts(),
athenzDomain);
checkTimeout("write state to zookeeper");
}
void writeEndpointCertificateMetadataZK() {
endpointCertificateMetadata.ifPresent(metadata ->
endpointCertificateMetadataStore.writeEndpointCertificateMetadata(applicationId, metadata));
checkTimeout("write endpoint certificate metadata to zookeeper");
}
void writeContainerEndpointsZK() {
if (!params.containerEndpoints().isEmpty()) {
containerEndpoints.write(applicationId, params.containerEndpoints());
}
checkTimeout("write container endpoints to zookeeper");
}
void distribute() {
prepareResult.asList().forEach(modelResult -> modelResult.model
.distributeFiles(modelResult.fileDistributionProvider.getFileDistribution()));
checkTimeout("distribute files");
}
ConfigChangeActions result() {
return prepareResult.getConfigChangeActions();
}
private Set<ContainerEndpoint> getEndpoints(List<ContainerEndpoint> endpoints) {
if (endpoints == null || endpoints.isEmpty()) {
endpoints = this.containerEndpoints.read(applicationId);
}
return ImmutableSet.copyOf(endpoints);
}
} |
These paths should also be removed from `com.yahoo.vespa.hosted.controller.api.role.PathGroup` | private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
} | if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); | private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
} | class ApplicationApiHandler extends LoggingRequestHandler {
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx);
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.getErrorCode()) {
case NOT_FOUND:
return new ErrorResponse(NOT_FOUND, e.getErrorCode().name(), Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.getErrorCode().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getErrorCode().name(), Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.getErrorCode().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
if (controller.tenants().get(tenantName).isEmpty())
return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (com.yahoo.vespa.hosted.controller.Application application : controller.applications().asList(tenant)) {
if (applicationName.map(application.id().application().value()::equals).orElse(true)) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Only manually deployed zones have dev packages");
ZoneId zone = type.zone(controller.system());
byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value());
long buildNumber;
var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> {
try {
return Long.parseLong(build);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid build number", e);
}
});
if (requestedBuild.isEmpty()) {
var application = controller.applications().requireApplication(tenantAndApplication);
var latestBuild = application.latestVersion().map(ApplicationVersion::buildNumber).orElse(OptionalLong.empty());
if (latestBuild.isEmpty()) {
throw new NotExistsException("No application package has been submitted for '" + tenantAndApplication + "'");
}
buildNumber = latestBuild.getAsLong();
} else {
buildNumber = requestedBuild.get();
}
var applicationPackage = controller.applications().applicationStore().find(tenantAndApplication.tenant(), tenantAndApplication.application(), buildNumber);
var filename = tenantAndApplication + "-build" + buildNumber + ".zip";
if (applicationPackage.isEmpty()) {
throw new NotExistsException("No application package found for '" +
tenantAndApplication +
"' with build number " + buildNumber);
}
return new ZipResponse(filename, applicationPackage.get());
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName) {
Slime slime = new Slime();
slime.setObject().setString("compileVersion",
compileVersion(TenantAndApplicationId.from(tenantName, applicationName)).toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = ((CloudTenant) controller.tenants().require(TenantName.from(tenantName))).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private com.yahoo.vespa.hosted.controller.Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.flavor());
toSlime(node.resources(), nodeObject);
nodeObject.setBool("fastDisk", node.resources().diskSpeed() == NodeResources.DiskSpeed.fast);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
cluster.target().ifPresent(target -> toSlime(target, clusterObject.setObject("target")));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
private static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case unorchestrated: return "unorchestrated";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName(), requireTests)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, com.yahoo.vespa.hosted.controller.Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion")));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
globalEndpointsToSlime(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void globalEndpointsToSlime(Cursor object, Instance instance) {
var globalEndpointUrls = new LinkedHashSet<String>();
controller.routing().endpointsOf(instance.id())
.requiresRotation()
.not().legacy()
.asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalEndpointUrls::add);
var globalRotationsArray = object.setArray("globalRotations");
globalEndpointUrls.forEach(globalRotationsArray::addString);
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
com.yahoo.vespa.hosted.controller.Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.latestVersion().ifPresent(version -> {
sourceRevisionToSlime(version.source(), object.setObject("source"));
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
globalEndpointsToSlime(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment())
.map(job -> job.type().zone(controller.system()))
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
ZoneId.from(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Endpoint endpoint, String cluster, Cursor object) {
object.setString("cluster", cluster);
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
var endpointArray = response.setArray("endpoints");
for (var endpoint : controller.routing().endpointsOf(deploymentId)) {
toSlime(endpoint, endpoint.name(), endpointArray.addObject());
}
var globalEndpoints = controller.routing().endpointsOf(application, deploymentId.applicationId().instance())
.not().legacy()
.targets(deploymentId.zoneId());
for (var endpoint : globalEndpoints) {
toSlime(endpoint, "", endpointArray.addObject());
}
response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
JobType.from(controller.system(), deployment.zone())
.map(type -> new JobId(instance.id(), type))
.map(status.jobSteps()::get)
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.applicationVersionToSlime(
response.setObject("applicationVersion"), deployment.applicationVersion());
if (!status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else response.setString("status", "running");
});
}
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if ( ! applicationVersion.isUnknown()) {
object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong());
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
applicationVersion.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
applicationVersion.commit().ifPresent(commit -> object.setString("commit", commit));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if (revision.isEmpty()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
/**
* Returns a non-broken, released version at least as old as the oldest platform the given application is on.
*
* If no known version is applicable, the newest version at least as old as the oldest platform is selected,
* among all versions released for this system. If no such versions exists, throws an IllegalStateException.
*/
private Version compileVersion(TenantAndApplicationId id) {
Version oldestPlatform = controller.applications().oldestInstalledPlatform(id);
VersionStatus versionStatus = controller.versionStatus();
return versionStatus.versions().stream()
.filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low))
.filter(VespaVersion::isReleased)
.map(VespaVersion::versionNumber)
.filter(version -> ! version.isAfter(oldestPlatform))
.max(Comparator.naturalOrder())
.orElseGet(() -> controller.mavenRepository().metadata().versions().stream()
.filter(version -> ! version.isAfter(oldestPlatform))
.filter(version -> ! versionStatus.versions().stream()
.map(VespaVersion::versionNumber)
.collect(Collectors.toSet()).contains(version))
.max(Comparator.naturalOrder())
.orElseThrow(() -> new IllegalStateException("No available releases of " +
controller.mavenRepository().artifactId())));
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = ZoneId.from(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
var deploymentId = new DeploymentId(instance.id(), zone);
setGlobalRotationStatus(deploymentId, inService, request);
setGlobalEndpointStatus(deploymentId, inService, request);
return new MessageResponse(String.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
/** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */
private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
var status = inService ? GlobalRouting.Status.in : GlobalRouting.Status.out;
controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent);
}
/** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */
private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var requestData = toSlime(request.getData()).get();
var reason = mandatory("reason", requestData).asString();
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
long timestamp = controller.clock().instant().getEpochSecond();
var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp);
controller.routing().setGlobalRotationStatus(deployment, endpointStatus);
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
controller.routing().globalRotationStatus(deploymentId)
.forEach((endpoint, status) -> {
array.addString(endpoint.upstreamIdOf(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.getStatus().name());
statusObject.setString("reason", status.getReason() == null ? "" : status.getReason());
statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent());
statusObject.setLong("timestamp", status.getEpoch());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = ZoneId.from(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse metering(String tenant, String application, HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
MeteringData meteringData = controller.serviceRegistry()
.meteringService()
.getMeteringData(TenantName.from(tenant), ApplicationName.from(application));
ResourceAllocation currentSnapshot = meteringData.getCurrentSnapshot();
Cursor currentRate = root.setObject("currentrate");
currentRate.setDouble("cpu", currentSnapshot.getCpuCores());
currentRate.setDouble("mem", currentSnapshot.getMemoryGb());
currentRate.setDouble("disk", currentSnapshot.getDiskGb());
ResourceAllocation thisMonth = meteringData.getThisMonth();
Cursor thismonth = root.setObject("thismonth");
thismonth.setDouble("cpu", thisMonth.getCpuCores());
thismonth.setDouble("mem", thisMonth.getMemoryGb());
thismonth.setDouble("disk", thisMonth.getDiskGb());
ResourceAllocation lastMonth = meteringData.getLastMonth();
Cursor lastmonth = root.setObject("lastmonth");
lastmonth.setDouble("cpu", lastMonth.getCpuCores());
lastmonth.setDouble("mem", lastMonth.getMemoryGb());
lastmonth.setDouble("disk", lastMonth.getDiskGb());
Map<ApplicationId, List<ResourceSnapshot>> history = meteringData.getSnapshotHistory();
Cursor details = root.setObject("details");
Cursor detailsCpu = details.setObject("cpu");
Cursor detailsMem = details.setObject("mem");
Cursor detailsDisk = details.setObject("disk");
history.entrySet().stream()
.forEach(entry -> {
String instanceName = entry.getKey().instance().value();
Cursor detailsCpuApp = detailsCpu.setObject(instanceName);
Cursor detailsMemApp = detailsMem.setObject(instanceName);
Cursor detailsDiskApp = detailsDisk.setObject(instanceName);
Cursor detailsCpuData = detailsCpuApp.setArray("data");
Cursor detailsMemData = detailsMemApp.setArray("data");
Cursor detailsDiskData = detailsDiskApp.setArray("data");
entry.getValue().stream()
.forEach(resourceSnapshot -> {
Cursor cpu = detailsCpuData.addObject();
cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
cpu.setDouble("value", resourceSnapshot.getCpuCores());
Cursor mem = detailsMemData.addObject();
mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
mem.setDouble("value", resourceSnapshot.getMemoryGb());
Cursor disk = detailsDiskData.addObject();
disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
disk.setDouble("value", resourceSnapshot.getDiskGb());
});
});
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region));
if ("container-clustercontroller".equals((serviceName)) && restPath.contains("/status/")) {
String result = controller.serviceRegistry().configServer().getClusterControllerStatus(deploymentId, restPath);
return new HtmlResponse(result);
}
Map<?,?> result = controller.serviceRegistry().configServer().getServiceApiResponse(deploymentId, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(deploymentId.zoneId(),
deploymentId.applicationId(),
controller.zoneRegistry().getConfigServerApiUris(deploymentId.zoneId()),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
com.yahoo.vespa.hosted.controller.Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
if (version.equals(Version.emptyVersion))
version = controller.systemVersion();
if ( ! systemHasVersion(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + controller.versionStatus().versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = Change.of(application.get().latestVersion().get());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new);
controller.applications().restart(deploymentId, hostname);
return new MessageResponse("Requested restart of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone(controller.system())),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
controller.jobController().deploy(id, type, version, applicationPackage);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the proxy application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.proxy.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
if (controller.versionStatus().isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = controller.versionStatus().systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.proxy, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Optional<com.yahoo.vespa.hosted.controller.Application> application = controller.applications().getApplication(TenantAndApplicationId.from(applicationId));
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
if (deployDirectly && applicationPackage.isEmpty() && applicationVersion.isEmpty() && vespaVersion.isEmpty()) {
Optional<Deployment> deployment = controller.applications().getInstance(applicationId)
.map(Instance::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(deployment.isEmpty())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
applicationPackage.ifPresent(aPackage -> controller.applications().verifyApplicationIdentityConfiguration(applicationId.tenant(),
Optional.of(applicationId.instance()),
Optional.of(zone),
aPackage,
Optional.of(requireUserPrincipal(request))));
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass);
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if (tenant.isEmpty())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(defaultInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(defaultInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
var testedZone = type.zone(controller.system());
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().zoneEndpointsOf(deployments),
controller.applications().contentClustersByZone(deployments)));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name());
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
break;
}
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (com.yahoo.vespa.hosted.controller.Application application : applications) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), instance, status, request);
else
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
if ( ! controller.zoneRegistry().system().isPublic())
object.setDouble("cost", Math.round(resources.nodes() * resources.nodeResources().cost() * 100.0 / 3.0) / 100.0);
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(Run run, Cursor object) {
object.setLong("id", run.id().number());
object.setString("version", run.versions().targetPlatform().toFullString());
if ( ! run.versions().targetApplication().isUnknown())
toSlime(run.versions().targetApplication(), object.setObject("revision"));
object.setString("reason", "unknown reason");
object.setLong("at", run.end().orElse(run.start()).toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setBool("allowed", refeedAction.allowed);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private boolean systemHasVersion(Version version) {
return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version));
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
sourceUrl,
commit,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("x-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case shared: return "shared";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
} | class ApplicationApiHandler extends LoggingRequestHandler {
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx);
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.getErrorCode()) {
case NOT_FOUND:
return new ErrorResponse(NOT_FOUND, e.getErrorCode().name(), Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.getErrorCode().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getErrorCode().name(), Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.getErrorCode().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
if (controller.tenants().get(tenantName).isEmpty())
return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (com.yahoo.vespa.hosted.controller.Application application : controller.applications().asList(tenant)) {
if (applicationName.map(application.id().application().value()::equals).orElse(true)) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Only manually deployed zones have dev packages");
ZoneId zone = type.zone(controller.system());
byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value());
long buildNumber;
var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> {
try {
return Long.parseLong(build);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid build number", e);
}
});
if (requestedBuild.isEmpty()) {
var application = controller.applications().requireApplication(tenantAndApplication);
var latestBuild = application.latestVersion().map(ApplicationVersion::buildNumber).orElse(OptionalLong.empty());
if (latestBuild.isEmpty()) {
throw new NotExistsException("No application package has been submitted for '" + tenantAndApplication + "'");
}
buildNumber = latestBuild.getAsLong();
} else {
buildNumber = requestedBuild.get();
}
var applicationPackage = controller.applications().applicationStore().find(tenantAndApplication.tenant(), tenantAndApplication.application(), buildNumber);
var filename = tenantAndApplication + "-build" + buildNumber + ".zip";
if (applicationPackage.isEmpty()) {
throw new NotExistsException("No application package found for '" +
tenantAndApplication +
"' with build number " + buildNumber);
}
return new ZipResponse(filename, applicationPackage.get());
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName) {
Slime slime = new Slime();
slime.setObject().setString("compileVersion",
compileVersion(TenantAndApplicationId.from(tenantName, applicationName)).toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = ((CloudTenant) controller.tenants().require(TenantName.from(tenantName))).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private com.yahoo.vespa.hosted.controller.Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.flavor());
toSlime(node.resources(), nodeObject);
nodeObject.setBool("fastDisk", node.resources().diskSpeed() == NodeResources.DiskSpeed.fast);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
cluster.target().ifPresent(target -> toSlime(target, clusterObject.setObject("target")));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
private static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case unorchestrated: return "unorchestrated";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName(), requireTests)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, com.yahoo.vespa.hosted.controller.Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion")));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
globalEndpointsToSlime(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void globalEndpointsToSlime(Cursor object, Instance instance) {
var globalEndpointUrls = new LinkedHashSet<String>();
controller.routing().endpointsOf(instance.id())
.requiresRotation()
.not().legacy()
.asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalEndpointUrls::add);
var globalRotationsArray = object.setArray("globalRotations");
globalEndpointUrls.forEach(globalRotationsArray::addString);
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
com.yahoo.vespa.hosted.controller.Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.latestVersion().ifPresent(version -> {
sourceRevisionToSlime(version.source(), object.setObject("source"));
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
globalEndpointsToSlime(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment())
.map(job -> job.type().zone(controller.system()))
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
ZoneId.from(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Endpoint endpoint, String cluster, Cursor object) {
object.setString("cluster", cluster);
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
var endpointArray = response.setArray("endpoints");
for (var endpoint : controller.routing().endpointsOf(deploymentId)) {
toSlime(endpoint, endpoint.name(), endpointArray.addObject());
}
var globalEndpoints = controller.routing().endpointsOf(application, deploymentId.applicationId().instance())
.not().legacy()
.targets(deploymentId.zoneId());
for (var endpoint : globalEndpoints) {
toSlime(endpoint, "", endpointArray.addObject());
}
response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
JobType.from(controller.system(), deployment.zone())
.map(type -> new JobId(instance.id(), type))
.map(status.jobSteps()::get)
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.applicationVersionToSlime(
response.setObject("applicationVersion"), deployment.applicationVersion());
if (!status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else response.setString("status", "running");
});
}
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if ( ! applicationVersion.isUnknown()) {
object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong());
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
applicationVersion.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
applicationVersion.commit().ifPresent(commit -> object.setString("commit", commit));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if (revision.isEmpty()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
/**
* Returns a non-broken, released version at least as old as the oldest platform the given application is on.
*
* If no known version is applicable, the newest version at least as old as the oldest platform is selected,
* among all versions released for this system. If no such versions exists, throws an IllegalStateException.
*/
private Version compileVersion(TenantAndApplicationId id) {
Version oldestPlatform = controller.applications().oldestInstalledPlatform(id);
VersionStatus versionStatus = controller.versionStatus();
return versionStatus.versions().stream()
.filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low))
.filter(VespaVersion::isReleased)
.map(VespaVersion::versionNumber)
.filter(version -> ! version.isAfter(oldestPlatform))
.max(Comparator.naturalOrder())
.orElseGet(() -> controller.mavenRepository().metadata().versions().stream()
.filter(version -> ! version.isAfter(oldestPlatform))
.filter(version -> ! versionStatus.versions().stream()
.map(VespaVersion::versionNumber)
.collect(Collectors.toSet()).contains(version))
.max(Comparator.naturalOrder())
.orElseThrow(() -> new IllegalStateException("No available releases of " +
controller.mavenRepository().artifactId())));
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = ZoneId.from(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
var deploymentId = new DeploymentId(instance.id(), zone);
setGlobalRotationStatus(deploymentId, inService, request);
setGlobalEndpointStatus(deploymentId, inService, request);
return new MessageResponse(String.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
/** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */
private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
var status = inService ? GlobalRouting.Status.in : GlobalRouting.Status.out;
controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent);
}
/** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */
private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var requestData = toSlime(request.getData()).get();
var reason = mandatory("reason", requestData).asString();
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
long timestamp = controller.clock().instant().getEpochSecond();
var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp);
controller.routing().setGlobalRotationStatus(deployment, endpointStatus);
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
controller.routing().globalRotationStatus(deploymentId)
.forEach((endpoint, status) -> {
array.addString(endpoint.upstreamIdOf(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.getStatus().name());
statusObject.setString("reason", status.getReason() == null ? "" : status.getReason());
statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent());
statusObject.setLong("timestamp", status.getEpoch());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = ZoneId.from(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse metering(String tenant, String application, HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
MeteringData meteringData = controller.serviceRegistry()
.meteringService()
.getMeteringData(TenantName.from(tenant), ApplicationName.from(application));
ResourceAllocation currentSnapshot = meteringData.getCurrentSnapshot();
Cursor currentRate = root.setObject("currentrate");
currentRate.setDouble("cpu", currentSnapshot.getCpuCores());
currentRate.setDouble("mem", currentSnapshot.getMemoryGb());
currentRate.setDouble("disk", currentSnapshot.getDiskGb());
ResourceAllocation thisMonth = meteringData.getThisMonth();
Cursor thismonth = root.setObject("thismonth");
thismonth.setDouble("cpu", thisMonth.getCpuCores());
thismonth.setDouble("mem", thisMonth.getMemoryGb());
thismonth.setDouble("disk", thisMonth.getDiskGb());
ResourceAllocation lastMonth = meteringData.getLastMonth();
Cursor lastmonth = root.setObject("lastmonth");
lastmonth.setDouble("cpu", lastMonth.getCpuCores());
lastmonth.setDouble("mem", lastMonth.getMemoryGb());
lastmonth.setDouble("disk", lastMonth.getDiskGb());
Map<ApplicationId, List<ResourceSnapshot>> history = meteringData.getSnapshotHistory();
Cursor details = root.setObject("details");
Cursor detailsCpu = details.setObject("cpu");
Cursor detailsMem = details.setObject("mem");
Cursor detailsDisk = details.setObject("disk");
history.entrySet().stream()
.forEach(entry -> {
String instanceName = entry.getKey().instance().value();
Cursor detailsCpuApp = detailsCpu.setObject(instanceName);
Cursor detailsMemApp = detailsMem.setObject(instanceName);
Cursor detailsDiskApp = detailsDisk.setObject(instanceName);
Cursor detailsCpuData = detailsCpuApp.setArray("data");
Cursor detailsMemData = detailsMemApp.setArray("data");
Cursor detailsDiskData = detailsDiskApp.setArray("data");
entry.getValue().stream()
.forEach(resourceSnapshot -> {
Cursor cpu = detailsCpuData.addObject();
cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
cpu.setDouble("value", resourceSnapshot.getCpuCores());
Cursor mem = detailsMemData.addObject();
mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
mem.setDouble("value", resourceSnapshot.getMemoryGb());
Cursor disk = detailsDiskData.addObject();
disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
disk.setDouble("value", resourceSnapshot.getDiskGb());
});
});
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region));
if ("container-clustercontroller".equals((serviceName)) && restPath.contains("/status/")) {
String result = controller.serviceRegistry().configServer().getClusterControllerStatus(deploymentId, restPath);
return new HtmlResponse(result);
}
Map<?,?> result = controller.serviceRegistry().configServer().getServiceApiResponse(deploymentId, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(deploymentId.zoneId(),
deploymentId.applicationId(),
controller.zoneRegistry().getConfigServerApiUris(deploymentId.zoneId()),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
com.yahoo.vespa.hosted.controller.Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
if (version.equals(Version.emptyVersion))
version = controller.systemVersion();
if ( ! systemHasVersion(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + controller.versionStatus().versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = Change.of(application.get().latestVersion().get());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new);
controller.applications().restart(deploymentId, hostname);
return new MessageResponse("Requested restart of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone(controller.system())),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
controller.jobController().deploy(id, type, version, applicationPackage);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the proxy application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.proxy.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
if (controller.versionStatus().isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = controller.versionStatus().systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.proxy, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Optional<com.yahoo.vespa.hosted.controller.Application> application = controller.applications().getApplication(TenantAndApplicationId.from(applicationId));
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
if (deployDirectly && applicationPackage.isEmpty() && applicationVersion.isEmpty() && vespaVersion.isEmpty()) {
Optional<Deployment> deployment = controller.applications().getInstance(applicationId)
.map(Instance::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(deployment.isEmpty())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
applicationPackage.ifPresent(aPackage -> controller.applications().verifyApplicationIdentityConfiguration(applicationId.tenant(),
Optional.of(applicationId.instance()),
Optional.of(zone),
aPackage,
Optional.of(requireUserPrincipal(request))));
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass);
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if (tenant.isEmpty())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(defaultInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(defaultInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
var testedZone = type.zone(controller.system());
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().zoneEndpointsOf(deployments),
controller.applications().contentClustersByZone(deployments)));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name());
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
break;
}
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (com.yahoo.vespa.hosted.controller.Application application : applications) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), instance, status, request);
else
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
if ( ! controller.zoneRegistry().system().isPublic())
object.setDouble("cost", Math.round(resources.nodes() * resources.nodeResources().cost() * 100.0 / 3.0) / 100.0);
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(Run run, Cursor object) {
object.setLong("id", run.id().number());
object.setString("version", run.versions().targetPlatform().toFullString());
if ( ! run.versions().targetApplication().isUnknown())
toSlime(run.versions().targetApplication(), object.setObject("revision"));
object.setString("reason", "unknown reason");
object.setLong("at", run.end().orElse(run.start()).toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setBool("allowed", refeedAction.allowed);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private boolean systemHasVersion(Version version) {
return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version));
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
sourceUrl,
commit,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("x-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case shared: return "shared";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
} |
Could use `cachedIndex.computeIfAbsent` here instead of explicit branch (though not needing a lambda might be faster?) | private int getCachedIndex(String nodeName) {
Integer index = cachedIndex.get(nodeName);
if (index == null) {
index = getIndex(nodeName);
cachedIndex.put(nodeName, index);
}
return index;
} | } | private int getCachedIndex(String nodeName) {
return cachedIndex.computeIfAbsent(nodeName, key -> getIndex(key));
} | class Node {
Node(Mirror.Entry e, NodeMetrics m) { entry = e; metrics = m; }
Mirror.Entry entry;
NodeMetrics metrics;
} | class Node {
Node(Mirror.Entry e, NodeMetrics m) { entry = e; metrics = m; }
Mirror.Entry entry;
NodeMetrics metrics;
} |
But not by this instance? Wouldn’t this also destruktivt the next gen create by DI, or isn’t the registre shared? | protected void destroy() {
laterExecutor.shutdown();
docprocServiceRegistry.allComponents().forEach(docprocService -> docprocService.deconstruct());
} | docprocServiceRegistry.allComponents().forEach(docprocService -> docprocService.deconstruct()); | protected void destroy() {
laterExecutor.shutdown();
docprocServiceRegistry.allComponents().forEach(docprocService -> docprocService.deconstruct());
} | class DocumentProcessingHandler extends AbstractRequestHandler {
private static Logger log = Logger.getLogger(DocumentProcessingHandler.class.getName());
private final ComponentRegistry<DocprocService> docprocServiceRegistry;
private final ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry;
private final ChainRegistry<DocumentProcessor> chainRegistry = new ChainRegistry<>();
private final ScheduledThreadPoolExecutor laterExecutor =
new ScheduledThreadPoolExecutor(2, new DaemonThreadFactory("docproc-later-"));
private ContainerDocumentConfig containerDocConfig;
private final DocumentTypeManager documentTypeManager;
public DocumentProcessingHandler(ComponentRegistry<DocprocService> docprocServiceRegistry,
ComponentRegistry<DocumentProcessor> documentProcessorComponentRegistry,
ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry,
int numThreads,
DocumentTypeManager documentTypeManager,
ChainsModel chainsModel, SchemaMap schemaMap, Statistics statistics,
Metric metric,
ContainerDocumentConfig containerDocConfig) {
this.docprocServiceRegistry = docprocServiceRegistry;
this.docFactoryRegistry = docFactoryRegistry;
this.containerDocConfig = containerDocConfig;
this.documentTypeManager = documentTypeManager;
DocprocService.schemaMap = schemaMap;
laterExecutor.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
laterExecutor.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
if (chainsModel != null) {
prepareChainRegistry(chainRegistry, chainsModel, documentProcessorComponentRegistry);
for (Chain<DocumentProcessor> chain : chainRegistry.allComponents()) {
log.config("Setting up call stack for chain " + chain.getId());
DocprocService service = new DocprocService(chain.getId(), convertToCallStack(chain, statistics, metric), documentTypeManager, computeNumThreads(numThreads));
service.setInService(true);
docprocServiceRegistry.register(service.getId(), service);
}
}
}
private static int computeNumThreads(int maxThreads) {
return (maxThreads > 0) ? maxThreads : Runtime.getRuntime().availableProcessors();
}
public DocumentProcessingHandler(ComponentRegistry<DocprocService> docprocServiceRegistry,
ComponentRegistry<DocumentProcessor> documentProcessorComponentRegistry,
ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry,
DocumentProcessingHandlerParameters params) {
this(docprocServiceRegistry, documentProcessorComponentRegistry, docFactoryRegistry,
params.getMaxNumThreads(),
params.getDocumentTypeManager(), params.getChainsModel(), params.getSchemaMap(),
params.getStatisticsManager(),
params.getMetric(),
params.getContainerDocConfig());
}
@Inject
public DocumentProcessingHandler(ComponentRegistry<DocumentProcessor> documentProcessorComponentRegistry,
ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry,
ChainsConfig chainsConfig,
SchemamappingConfig mappingConfig,
DocumentmanagerConfig docManConfig,
DocprocConfig docprocConfig,
ContainerMbusConfig containerMbusConfig,
ContainerDocumentConfig containerDocConfig,
Statistics manager,
Metric metric) {
this(new ComponentRegistry<>(),
documentProcessorComponentRegistry, docFactoryRegistry, new DocumentProcessingHandlerParameters().setMaxNumThreads
(docprocConfig.numthreads())
.setDocumentTypeManager(new DocumentTypeManager(docManConfig))
.setChainsModel(buildFromConfig(chainsConfig)).setSchemaMap(configureMapping(mappingConfig))
.setStatisticsManager(manager)
.setMetric(metric)
.setContainerDocumentConfig(containerDocConfig));
}
@Override
public ComponentRegistry<DocprocService> getDocprocServiceRegistry() {
return docprocServiceRegistry;
}
public ChainRegistry<DocumentProcessor> getChains() {
return chainRegistry;
}
private static SchemaMap configureMapping(SchemamappingConfig mappingConfig) {
SchemaMap map = new SchemaMap();
map.configure(mappingConfig);
return map;
}
private static CallStack convertToCallStack(Chain<DocumentProcessor> chain, Statistics statistics, Metric metric) {
CallStack stack = new CallStack(chain.getId().stringValue(), statistics, metric);
for (DocumentProcessor processor : chain.components()) {
processor.getFieldMap().putAll(DocprocService.schemaMap.chainMap(chain.getId().stringValue(), processor.getId().stringValue()));
stack.addLast(processor);
}
return stack;
}
@Override
public ContentChannel handleRequest(Request request, ResponseHandler handler) {
RequestContext requestContext;
if (request instanceof MbusRequest) {
requestContext = new MbusRequestContext((MbusRequest) request, handler, docprocServiceRegistry, docFactoryRegistry, containerDocConfig);
} else {
throw new IllegalArgumentException("Request type not supported: " + request);
}
if (!requestContext.isProcessable()) {
requestContext.skip();
return null;
}
String serviceName = requestContext.getServiceName();
DocprocService service = docprocServiceRegistry.getComponent(serviceName);
if (service == null) {
log.log(Level.SEVERE, "DocprocService for session '" + serviceName +
"' not found, returning request '" + requestContext + "'.");
requestContext.processingFailed(RequestContext.ErrorCode.ERROR_PROCESSING_FAILURE,
"DocprocService " + serviceName + " not found.");
return null;
} else if (service.getExecutor().getCallStack().size() == 0) {
requestContext.skip();
return null;
}
DocumentProcessingTask task = new DocumentProcessingTask(requestContext, this, service, service.getThreadPoolExecutor());
task.submit();
return null;
}
void submit(DocumentProcessingTask task, long delay) {
LaterTimerTask timerTask = new LaterTimerTask(task, delay);
laterExecutor.schedule(timerTask, delay, TimeUnit.MILLISECONDS);
}
private class LaterTimerTask extends TimerTask {
private DocumentProcessingTask processingTask;
private long delay;
private LaterTimerTask(DocumentProcessingTask processingTask, long delay) {
this.delay = delay;
log.log(Level.FINE, "Enqueueing in " + delay + " ms due to Progress.LATER: " + processingTask);
this.processingTask = processingTask;
}
@Override
public void run() {
log.log(Level.FINE, "Submitting after having waited " + delay + " ms in LATER queue: " + processingTask);
processingTask.submit();
}
}
public DocumentTypeManager getDocumentTypeManager() {
return documentTypeManager;
}
} | class DocumentProcessingHandler extends AbstractRequestHandler {
private static Logger log = Logger.getLogger(DocumentProcessingHandler.class.getName());
private final ComponentRegistry<DocprocService> docprocServiceRegistry;
private final ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry;
private final ChainRegistry<DocumentProcessor> chainRegistry = new ChainRegistry<>();
private final ScheduledThreadPoolExecutor laterExecutor =
new ScheduledThreadPoolExecutor(2, new DaemonThreadFactory("docproc-later-"));
private ContainerDocumentConfig containerDocConfig;
private final DocumentTypeManager documentTypeManager;
public DocumentProcessingHandler(ComponentRegistry<DocprocService> docprocServiceRegistry,
ComponentRegistry<DocumentProcessor> documentProcessorComponentRegistry,
ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry,
int numThreads,
DocumentTypeManager documentTypeManager,
ChainsModel chainsModel, SchemaMap schemaMap, Statistics statistics,
Metric metric,
ContainerDocumentConfig containerDocConfig) {
this.docprocServiceRegistry = docprocServiceRegistry;
this.docFactoryRegistry = docFactoryRegistry;
this.containerDocConfig = containerDocConfig;
this.documentTypeManager = documentTypeManager;
DocprocService.schemaMap = schemaMap;
laterExecutor.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
laterExecutor.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
if (chainsModel != null) {
prepareChainRegistry(chainRegistry, chainsModel, documentProcessorComponentRegistry);
for (Chain<DocumentProcessor> chain : chainRegistry.allComponents()) {
log.config("Setting up call stack for chain " + chain.getId());
DocprocService service = new DocprocService(chain.getId(), convertToCallStack(chain, statistics, metric), documentTypeManager, computeNumThreads(numThreads));
service.setInService(true);
docprocServiceRegistry.register(service.getId(), service);
}
}
}
private static int computeNumThreads(int maxThreads) {
return (maxThreads > 0) ? maxThreads : Runtime.getRuntime().availableProcessors();
}
public DocumentProcessingHandler(ComponentRegistry<DocprocService> docprocServiceRegistry,
ComponentRegistry<DocumentProcessor> documentProcessorComponentRegistry,
ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry,
DocumentProcessingHandlerParameters params) {
this(docprocServiceRegistry, documentProcessorComponentRegistry, docFactoryRegistry,
params.getMaxNumThreads(),
params.getDocumentTypeManager(), params.getChainsModel(), params.getSchemaMap(),
params.getStatisticsManager(),
params.getMetric(),
params.getContainerDocConfig());
}
@Inject
public DocumentProcessingHandler(ComponentRegistry<DocumentProcessor> documentProcessorComponentRegistry,
ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry,
ChainsConfig chainsConfig,
SchemamappingConfig mappingConfig,
DocumentmanagerConfig docManConfig,
DocprocConfig docprocConfig,
ContainerMbusConfig containerMbusConfig,
ContainerDocumentConfig containerDocConfig,
Statistics manager,
Metric metric) {
this(new ComponentRegistry<>(),
documentProcessorComponentRegistry, docFactoryRegistry, new DocumentProcessingHandlerParameters().setMaxNumThreads
(docprocConfig.numthreads())
.setDocumentTypeManager(new DocumentTypeManager(docManConfig))
.setChainsModel(buildFromConfig(chainsConfig)).setSchemaMap(configureMapping(mappingConfig))
.setStatisticsManager(manager)
.setMetric(metric)
.setContainerDocumentConfig(containerDocConfig));
}
@Override
public ComponentRegistry<DocprocService> getDocprocServiceRegistry() {
return docprocServiceRegistry;
}
public ChainRegistry<DocumentProcessor> getChains() {
return chainRegistry;
}
private static SchemaMap configureMapping(SchemamappingConfig mappingConfig) {
SchemaMap map = new SchemaMap();
map.configure(mappingConfig);
return map;
}
private static CallStack convertToCallStack(Chain<DocumentProcessor> chain, Statistics statistics, Metric metric) {
CallStack stack = new CallStack(chain.getId().stringValue(), statistics, metric);
for (DocumentProcessor processor : chain.components()) {
processor.getFieldMap().putAll(DocprocService.schemaMap.chainMap(chain.getId().stringValue(), processor.getId().stringValue()));
stack.addLast(processor);
}
return stack;
}
@Override
public ContentChannel handleRequest(Request request, ResponseHandler handler) {
RequestContext requestContext;
if (request instanceof MbusRequest) {
requestContext = new MbusRequestContext((MbusRequest) request, handler, docprocServiceRegistry, docFactoryRegistry, containerDocConfig);
} else {
throw new IllegalArgumentException("Request type not supported: " + request);
}
if (!requestContext.isProcessable()) {
requestContext.skip();
return null;
}
String serviceName = requestContext.getServiceName();
DocprocService service = docprocServiceRegistry.getComponent(serviceName);
if (service == null) {
log.log(Level.SEVERE, "DocprocService for session '" + serviceName +
"' not found, returning request '" + requestContext + "'.");
requestContext.processingFailed(RequestContext.ErrorCode.ERROR_PROCESSING_FAILURE,
"DocprocService " + serviceName + " not found.");
return null;
} else if (service.getExecutor().getCallStack().size() == 0) {
requestContext.skip();
return null;
}
DocumentProcessingTask task = new DocumentProcessingTask(requestContext, this, service, service.getThreadPoolExecutor());
task.submit();
return null;
}
void submit(DocumentProcessingTask task, long delay) {
LaterTimerTask timerTask = new LaterTimerTask(task, delay);
laterExecutor.schedule(timerTask, delay, TimeUnit.MILLISECONDS);
}
private class LaterTimerTask extends TimerTask {
private DocumentProcessingTask processingTask;
private long delay;
private LaterTimerTask(DocumentProcessingTask processingTask, long delay) {
this.delay = delay;
log.log(Level.FINE, "Enqueueing in " + delay + " ms due to Progress.LATER: " + processingTask);
this.processingTask = processingTask;
}
@Override
public void run() {
log.log(Level.FINE, "Submitting after having waited " + delay + " ms in LATER queue: " + processingTask);
processingTask.submit();
}
}
public DocumentTypeManager getDocumentTypeManager() {
return documentTypeManager;
}
} |
The returned `NodeMetrics` object is shared and thus may be read outside the lock whilst concurrently being updated by another thread. Is this a problem? Should we deep-copy the metrics to make sure they're internally consistent? This is not something introduced by this PR, but just a general observation. | public Node getRecipient(List<Mirror.Entry> choices) {
if (choices.isEmpty()) return null;
double weightSum = 0.0;
Node selectedNode = null;
synchronized (this) {
for (Mirror.Entry entry : choices) {
NodeMetrics nodeMetrics = getNodeMetrics(entry);
weightSum += nodeMetrics.weight;
if (weightSum > position) {
selectedNode = new Node(entry, nodeMetrics);
break;
}
}
if (selectedNode == null) {
position -= weightSum;
selectedNode = new Node(choices.get(0), getNodeMetrics(choices.get(0)));
}
position += 1.0;
selectedNode.metrics.sent++;
}
return selectedNode;
} | selectedNode = new Node(entry, nodeMetrics); | public Node getRecipient(List<Mirror.Entry> choices) {
if (choices.isEmpty()) return null;
double weightSum = 0.0;
Node selectedNode = null;
synchronized (this) {
for (Mirror.Entry entry : choices) {
NodeMetrics nodeMetrics = getNodeMetrics(entry);
weightSum += nodeMetrics.weight;
if (weightSum > position) {
selectedNode = new Node(entry, nodeMetrics);
break;
}
}
if (selectedNode == null) {
position -= weightSum;
selectedNode = new Node(choices.get(0), getNodeMetrics(choices.get(0)));
}
position += 1.0;
selectedNode.metrics.sent++;
}
return selectedNode;
} | class Node {
Node(Mirror.Entry e, NodeMetrics m) { entry = e; metrics = m; }
Mirror.Entry entry;
NodeMetrics metrics;
} | class Node {
Node(Mirror.Entry e, NodeMetrics m) { entry = e; metrics = m; }
Mirror.Entry entry;
NodeMetrics metrics;
} |
That was why I made it package private. I checked all other usage. That was only in tests. | public Node getRecipient(List<Mirror.Entry> choices) {
if (choices.isEmpty()) return null;
double weightSum = 0.0;
Node selectedNode = null;
synchronized (this) {
for (Mirror.Entry entry : choices) {
NodeMetrics nodeMetrics = getNodeMetrics(entry);
weightSum += nodeMetrics.weight;
if (weightSum > position) {
selectedNode = new Node(entry, nodeMetrics);
break;
}
}
if (selectedNode == null) {
position -= weightSum;
selectedNode = new Node(choices.get(0), getNodeMetrics(choices.get(0)));
}
position += 1.0;
selectedNode.metrics.sent++;
}
return selectedNode;
} | selectedNode = new Node(entry, nodeMetrics); | public Node getRecipient(List<Mirror.Entry> choices) {
if (choices.isEmpty()) return null;
double weightSum = 0.0;
Node selectedNode = null;
synchronized (this) {
for (Mirror.Entry entry : choices) {
NodeMetrics nodeMetrics = getNodeMetrics(entry);
weightSum += nodeMetrics.weight;
if (weightSum > position) {
selectedNode = new Node(entry, nodeMetrics);
break;
}
}
if (selectedNode == null) {
position -= weightSum;
selectedNode = new Node(choices.get(0), getNodeMetrics(choices.get(0)));
}
position += 1.0;
selectedNode.metrics.sent++;
}
return selectedNode;
} | class Node {
Node(Mirror.Entry e, NodeMetrics m) { entry = e; metrics = m; }
Mirror.Entry entry;
NodeMetrics metrics;
} | class Node {
Node(Mirror.Entry e, NodeMetrics m) { entry = e; metrics = m; }
Mirror.Entry entry;
NodeMetrics metrics;
} |
Done, I think the JIT compiler will solve the performance for us. | private int getCachedIndex(String nodeName) {
Integer index = cachedIndex.get(nodeName);
if (index == null) {
index = getIndex(nodeName);
cachedIndex.put(nodeName, index);
}
return index;
} | } | private int getCachedIndex(String nodeName) {
return cachedIndex.computeIfAbsent(nodeName, key -> getIndex(key));
} | class Node {
Node(Mirror.Entry e, NodeMetrics m) { entry = e; metrics = m; }
Mirror.Entry entry;
NodeMetrics metrics;
} | class Node {
Node(Mirror.Entry e, NodeMetrics m) { entry = e; metrics = m; }
Mirror.Entry entry;
NodeMetrics metrics;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.