comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
I added minSize = index + 1 to make it clearer. I wanted to keep the (CHILDLIST_SIZE_INCREMENTS - 1) as together with the division and the multiplication is an often used pattern for rounding of integers. | GroupListBuilder getOrCreateChildList(int tag, boolean ranked) {
int index = tag + 1;
if (childLists == null || index >= childLists.length) {
int reservedSize = (((index + 1) + (CHILDLIST_SIZE_INCREMENTS -1))/CHILDLIST_SIZE_INCREMENTS) * CHILDLIST_SIZE_INCREMENTS;
childLists = (childLists == null)
? new GroupListBuilder[reservedSize]
: Arrays.copyOf(childLists, reservedSize);
}
GroupListBuilder ret = childLists[index];
if (ret == null) {
ret = new GroupListBuilder(resultId.newChildId(childCount), tag, stable, ranked);
childLists[index] = ret;
childCount++;
}
return ret;
} | int reservedSize = (((index + 1) + (CHILDLIST_SIZE_INCREMENTS -1))/CHILDLIST_SIZE_INCREMENTS) * CHILDLIST_SIZE_INCREMENTS; | GroupListBuilder getOrCreateChildList(int tag, boolean ranked) {
int index = tag + 1;
if (childLists == null || index >= childLists.length) {
int minSize = index + 1;
int reservedSize = ((minSize + (CHILDLIST_SIZE_INCREMENTS - 1))/CHILDLIST_SIZE_INCREMENTS) * CHILDLIST_SIZE_INCREMENTS;
childLists = (childLists == null)
? new GroupListBuilder[reservedSize]
: Arrays.copyOf(childLists, reservedSize);
}
GroupListBuilder ret = childLists[index];
if (ret == null) {
ret = new GroupListBuilder(resultId.newChildId(childCount), tag, stable, ranked);
childLists[index] = ret;
childCount++;
}
return ret;
} | class GroupBuilder {
private static final int CHILDLIST_SIZE_INCREMENTS = 4;
boolean [] results = new boolean[8];
GroupListBuilder [] childLists;
int childCount = 0;
final ResultId resultId;
final com.yahoo.searchlib.aggregation.Group group;
final boolean stable;
GroupBuilder(ResultId resultId, com.yahoo.searchlib.aggregation.Group group, boolean stable) {
this.resultId = resultId;
this.group = group;
this.stable = stable;
}
Group build(double relevance) {
return fill(new Group(newGroupId(group), new Relevance(relevance)));
}
Group fill(Group group) {
for (AggregationResult result : this.group.getAggregationResults()) {
int tag = result.getTag();
if (result instanceof HitsAggregationResult) {
group.add(newHitList(group.size(), tag, (HitsAggregationResult)result));
} else {
String label = transform.getLabel(result.getTag());
if (label != null) {
group.setField(label, newResult(result, tag));
}
}
}
if (childLists != null) {
for (GroupListBuilder child : childLists) {
if (child != null) {
group.add(child.build());
}
}
}
return group;
}
void merge(com.yahoo.searchlib.aggregation.Group group) {
for (AggregationResult res : group.getAggregationResults()) {
int tag = res.getTag() + 1;
if (tag >= results.length) {
results = Arrays.copyOf(results, tag+8);
}
if ( ! results[tag] ) {
this.group.getAggregationResults().add(res);
results[tag] = true;
}
}
}
GroupId newGroupId(com.yahoo.searchlib.aggregation.Group execGroup) {
ResultNode res = execGroup.getId();
if (res instanceof FloatResultNode) {
return new DoubleId(res.getFloat());
} else if (res instanceof IntegerResultNode) {
return new LongId(res.getInteger());
} else if (res instanceof BoolResultNode) {
return new BoolId(((BoolResultNode)res).getValue());
} else if (res instanceof NullResultNode) {
return new NullId();
} else if (res instanceof RawResultNode) {
return new RawId(res.getRaw());
} else if (res instanceof StringResultNode) {
return new StringId(res.getString());
} else if (res instanceof FloatBucketResultNode) {
FloatBucketResultNode bucketId = (FloatBucketResultNode)res;
return new DoubleBucketId(bucketId.getFrom(), bucketId.getTo());
} else if (res instanceof IntegerBucketResultNode) {
IntegerBucketResultNode bucketId = (IntegerBucketResultNode)res;
return new LongBucketId(bucketId.getFrom(), bucketId.getTo());
} else if (res instanceof StringBucketResultNode) {
StringBucketResultNode bucketId = (StringBucketResultNode)res;
return new StringBucketId(bucketId.getFrom(), bucketId.getTo());
} else if (res instanceof RawBucketResultNode) {
RawBucketResultNode bucketId = (RawBucketResultNode)res;
return new RawBucketId(bucketId.getFrom(), bucketId.getTo());
} else {
throw new UnsupportedOperationException(res.getClass().getName());
}
}
Object newResult(ExpressionNode execResult, int tag) {
if (execResult instanceof AverageAggregationResult) {
return ((AverageAggregationResult)execResult).getAverage().getNumber();
} else if (execResult instanceof CountAggregationResult) {
return ((CountAggregationResult)execResult).getCount();
} else if (execResult instanceof ExpressionCountAggregationResult) {
long count = ((ExpressionCountAggregationResult)execResult).getEstimatedUniqueCount();
return correctExpressionCountEstimate(count, tag);
} else if (execResult instanceof MaxAggregationResult) {
return ((MaxAggregationResult)execResult).getMax().getValue();
} else if (execResult instanceof MinAggregationResult) {
return ((MinAggregationResult)execResult).getMin().getValue();
} else if (execResult instanceof SumAggregationResult) {
return ((SumAggregationResult) execResult).getSum().getValue();
} else if (execResult instanceof StandardDeviationAggregationResult) {
return ((StandardDeviationAggregationResult) execResult).getStandardDeviation();
} else if (execResult instanceof XorAggregationResult) {
return ((XorAggregationResult)execResult).getXor();
} else {
throw new UnsupportedOperationException(execResult.getClass().getName());
}
}
private long correctExpressionCountEstimate(long count, int tag) {
int actualGroupCount = group.getChildren().size();
if (actualGroupCount > 0 && count != actualGroupCount) {
if (transform.getMax(tag + 1) == 0 || transform.getMax(tag + 1) > actualGroupCount) {
return actualGroupCount;
}
}
return count;
}
HitList newHitList(int listIdx, int tag, HitsAggregationResult execResult) {
HitList hitList = new HitList(transform.getLabel(tag));
List<Hit> hits = execResult.getHits();
PageInfo page = new PageInfo(resultId.newChildId(listIdx), tag, stable, hits.size());
for (int i = page.firstEntry; i < page.lastEntry; ++i) {
hitList.add(hitConverter.toSearchHit(execResult.getSummaryClass(), hits.get(i)));
}
page.putContinuations(hitList.continuations());
return hitList;
}
} | class GroupBuilder {
private static final int CHILDLIST_SIZE_INCREMENTS = 4;
boolean [] results = new boolean[8];
GroupListBuilder [] childLists;
int childCount = 0;
final ResultId resultId;
final com.yahoo.searchlib.aggregation.Group group;
final boolean stable;
GroupBuilder(ResultId resultId, com.yahoo.searchlib.aggregation.Group group, boolean stable) {
this.resultId = resultId;
this.group = group;
this.stable = stable;
}
Group build(double relevance) {
return fill(new Group(newGroupId(group), new Relevance(relevance)));
}
Group fill(Group group) {
for (AggregationResult result : this.group.getAggregationResults()) {
int tag = result.getTag();
if (result instanceof HitsAggregationResult) {
group.add(newHitList(group.size(), tag, (HitsAggregationResult)result));
} else {
String label = transform.getLabel(result.getTag());
if (label != null) {
group.setField(label, newResult(result, tag));
}
}
}
if (childLists != null) {
for (GroupListBuilder child : childLists) {
if (child != null) {
group.add(child.build());
}
}
}
return group;
}
void merge(com.yahoo.searchlib.aggregation.Group group) {
for (AggregationResult res : group.getAggregationResults()) {
int tag = res.getTag() + 1;
if (tag >= results.length) {
results = Arrays.copyOf(results, tag+8);
}
if ( ! results[tag] ) {
this.group.getAggregationResults().add(res);
results[tag] = true;
}
}
}
GroupId newGroupId(com.yahoo.searchlib.aggregation.Group execGroup) {
ResultNode res = execGroup.getId();
if (res instanceof FloatResultNode) {
return new DoubleId(res.getFloat());
} else if (res instanceof IntegerResultNode) {
return new LongId(res.getInteger());
} else if (res instanceof BoolResultNode) {
return new BoolId(((BoolResultNode)res).getValue());
} else if (res instanceof NullResultNode) {
return new NullId();
} else if (res instanceof RawResultNode) {
return new RawId(res.getRaw());
} else if (res instanceof StringResultNode) {
return new StringId(res.getString());
} else if (res instanceof FloatBucketResultNode) {
FloatBucketResultNode bucketId = (FloatBucketResultNode)res;
return new DoubleBucketId(bucketId.getFrom(), bucketId.getTo());
} else if (res instanceof IntegerBucketResultNode) {
IntegerBucketResultNode bucketId = (IntegerBucketResultNode)res;
return new LongBucketId(bucketId.getFrom(), bucketId.getTo());
} else if (res instanceof StringBucketResultNode) {
StringBucketResultNode bucketId = (StringBucketResultNode)res;
return new StringBucketId(bucketId.getFrom(), bucketId.getTo());
} else if (res instanceof RawBucketResultNode) {
RawBucketResultNode bucketId = (RawBucketResultNode)res;
return new RawBucketId(bucketId.getFrom(), bucketId.getTo());
} else {
throw new UnsupportedOperationException(res.getClass().getName());
}
}
Object newResult(ExpressionNode execResult, int tag) {
if (execResult instanceof AverageAggregationResult) {
return ((AverageAggregationResult)execResult).getAverage().getNumber();
} else if (execResult instanceof CountAggregationResult) {
return ((CountAggregationResult)execResult).getCount();
} else if (execResult instanceof ExpressionCountAggregationResult) {
long count = ((ExpressionCountAggregationResult)execResult).getEstimatedUniqueCount();
return correctExpressionCountEstimate(count, tag);
} else if (execResult instanceof MaxAggregationResult) {
return ((MaxAggregationResult)execResult).getMax().getValue();
} else if (execResult instanceof MinAggregationResult) {
return ((MinAggregationResult)execResult).getMin().getValue();
} else if (execResult instanceof SumAggregationResult) {
return ((SumAggregationResult) execResult).getSum().getValue();
} else if (execResult instanceof StandardDeviationAggregationResult) {
return ((StandardDeviationAggregationResult) execResult).getStandardDeviation();
} else if (execResult instanceof XorAggregationResult) {
return ((XorAggregationResult)execResult).getXor();
} else {
throw new UnsupportedOperationException(execResult.getClass().getName());
}
}
private long correctExpressionCountEstimate(long count, int tag) {
int actualGroupCount = group.getChildren().size();
if (actualGroupCount > 0 && count != actualGroupCount) {
if (transform.getMax(tag + 1) == 0 || transform.getMax(tag + 1) > actualGroupCount) {
return actualGroupCount;
}
}
return count;
}
HitList newHitList(int listIdx, int tag, HitsAggregationResult execResult) {
HitList hitList = new HitList(transform.getLabel(tag));
List<Hit> hits = execResult.getHits();
PageInfo page = new PageInfo(resultId.newChildId(listIdx), tag, stable, hits.size());
for (int i = page.firstEntry; i < page.lastEntry; ++i) {
hitList.add(hitConverter.toSearchHit(execResult.getSummaryClass(), hits.get(i)));
}
page.putContinuations(hitList.continuations());
return hitList;
}
} |
This is a bit of a hack and should ideally be decided on the caller side. | public History with(Event event) {
List<Event> copy = new ArrayList<>(events);
if (!copy.isEmpty()) {
Event last = copy.get(copy.size() - 1);
if (last.type() == event.type()) {
copy.remove(last);
}
}
copy.add(event);
return new History(copy);
} | public History with(Event event) {
List<Event> copy = new ArrayList<>(events);
copy.add(event);
return new History(copy);
} | class History {
/** The maximum number of events to keep for a node */
private static final int MAX_SIZE = 15;
private final List<Event> events;
public History(List<Event> events) {
this(events, MAX_SIZE);
}
History(List<Event> events, int maxSize) {
this.events = Objects.requireNonNull(events, "events must be non-null")
.stream()
.sorted(Comparator.comparing(Event::at))
.skip(Math.max(events.size() - maxSize, 0))
.collect(Collectors.toUnmodifiableList());
}
/** Returns the latest event of given type, if it is present in this history */
public Optional<Event> event(Event.Type type) {
return events.stream().filter(event -> event.type() == type).max(Comparator.comparing(Event::at));
}
/** Returns true if a given event is registered in this history at the given time */
public boolean hasEventAt(Event.Type type, Instant time) {
return event(type).map(event -> event.at().equals(time))
.orElse(false);
}
/** Returns true if a given event is registered in this history after the given time */
public boolean hasEventAfter(Event.Type type, Instant time) {
return event(type).map(event -> event.at().isAfter(time))
.orElse(false);
}
/** Returns true if a given event is registered in this history before the given time */
public boolean hasEventBefore(Event.Type type, Instant time) {
return event(type).map(event -> event.at().isBefore(time))
.orElse(false);
}
public List<Event> asList() {
return events;
}
/** Returns a copy of this history with the given event added */
/** Returns a copy of this history with a record of this state transition added, if applicable */
public History recordStateTransition(Node.State from, Node.State to, Agent agent, Instant at) {
if (from == to && from != Node.State.reserved) return this;
switch (to) {
case provisioned: return this.with(new Event(Event.Type.provisioned, agent, at));
case deprovisioned: return this.with(new Event(Event.Type.deprovisioned, agent, at));
case ready: return this.withoutApplicationEvents().with(new Event(Event.Type.readied, agent, at));
case active: return this.with(new Event(Event.Type.activated, agent, at));
case inactive: return this.with(new Event(Event.Type.deactivated, agent, at));
case reserved: return this.with(new Event(Event.Type.reserved, agent, at));
case failed: return this.with(new Event(Event.Type.failed, agent, at));
case dirty: return this.with(new Event(Event.Type.deallocated, agent, at));
case parked: return this.with(new Event(Event.Type.parked, agent, at));
case breakfixed: return this.with(new Event(Event.Type.breakfixed, agent, at));
default: return this;
}
}
/**
* Events can be application or node level.
* This returns a copy of this history with all application level events removed.
*/
private History withoutApplicationEvents() {
return new History(asList().stream().filter(e -> ! e.type().isApplicationLevel()).collect(Collectors.toList()));
}
/** Returns the empty history */
public static History empty() { return new History(List.of()); }
@Override
public String toString() {
if (events.isEmpty()) return "history: (empty)";
StringBuilder b = new StringBuilder("history: ");
for (Event e : events)
b.append(e).append(", ");
b.setLength(b.length() - 2);
return b.toString();
}
/** An event which may happen to a node */
public static class Event {
private final Instant at;
private final Agent agent;
private final Type type;
public Event(Event.Type type, Agent agent, Instant at) {
this.type = type;
this.agent = agent;
this.at = at;
}
public enum Type {
activated,
breakfixed(false),
deactivated,
deallocated,
deprovisioned(false),
failed(false),
parked,
provisioned(false),
readied,
reserved,
/** The node was scheduled for retirement (hard) */
wantToRetire(false),
/** The node was scheduled for retirement (soft) */
preferToRetire(false),
/** This node was scheduled for failing */
wantToFail,
/** The active node was retired */
retired,
/** The active node went down according to the service monitor */
down,
/** The active node came up according to the service monitor */
up,
/** The node made a config request, indicating it is live */
requested,
/** The node resources/flavor were changed */
resized(false),
/** The node was rebooted */
rebooted(false),
/** The node upgraded its OS (implies a reboot) */
osUpgraded(false),
/** The node verified its firmware (whether this resulted in a reboot depends on the node model) */
firmwareVerified(false);
private final boolean applicationLevel;
/** Creates an application level event */
Type() {
this.applicationLevel = true;
}
Type(boolean applicationLevel) {
this.applicationLevel = applicationLevel;
}
/** Returns true if this is an application level event and false it it is a node level event */
public boolean isApplicationLevel() { return applicationLevel; }
}
/** Returns the type of event */
public Event.Type type() { return type; }
/** Returns the agent causing this event */
public Agent agent() { return agent; }
/** Returns the instant this even took place */
public Instant at() { return at; }
@Override
public String toString() { return "'" + type + "' event at " + at + " by " + agent; }
}
} | class History {
/** The maximum number of events to keep for a node */
private static final int MAX_SIZE = 15;
private final List<Event> events;
public History(List<Event> events) {
this(events, MAX_SIZE);
}
History(List<Event> events, int maxSize) {
this.events = Objects.requireNonNull(events, "events must be non-null")
.stream()
.sorted(Comparator.comparing(Event::at))
.skip(Math.max(events.size() - maxSize, 0))
.collect(Collectors.toUnmodifiableList());
}
/** Returns the last event of given type, if it is present in this history */
public Optional<Event> lastEvent(Event.Type type) {
return events.stream().filter(event -> event.type() == type).max(Comparator.comparing(Event::at));
}
/** Returns true if the last event of this type is registered in this history at the given time */
public boolean hasLastEventAt(Instant time, Event.Type type) {
return lastEvent(type).map(event -> event.at().equals(time))
.orElse(false);
}
/** Returns true if the last event of this type is registered after the given time */
public boolean hasLastEventAfter(Instant time, Event.Type type) {
return lastEvent(type).map(event -> event.at().isAfter(time))
.orElse(false);
}
/** Returns true if the last event of this type is registered before the given time */
public boolean hasLastEventBefore(Instant time, Event.Type type) {
return lastEvent(type).map(event -> event.at().isBefore(time))
.orElse(false);
}
public List<Event> asList() {
return events;
}
/** Returns a copy of this history with the given event added */
/** Returns a copy of this history with a record of this state transition added, if applicable */
public History recordStateTransition(Node.State from, Node.State to, Agent agent, Instant at) {
if (from == to && from != Node.State.reserved) return this;
switch (to) {
case provisioned: return this.with(new Event(Event.Type.provisioned, agent, at));
case deprovisioned: return this.with(new Event(Event.Type.deprovisioned, agent, at));
case ready: return this.withoutApplicationEvents().with(new Event(Event.Type.readied, agent, at));
case active: return this.with(new Event(Event.Type.activated, agent, at));
case inactive: return this.with(new Event(Event.Type.deactivated, agent, at));
case reserved: {
History history = this;
if (!events.isEmpty() && events.get(events.size() - 1).type() == Event.Type.reserved) {
history = new History(events.subList(0, events.size() - 1));
}
return history.with(new Event(Event.Type.reserved, agent, at));
}
case failed: return this.with(new Event(Event.Type.failed, agent, at));
case dirty: return this.with(new Event(Event.Type.deallocated, agent, at));
case parked: return this.with(new Event(Event.Type.parked, agent, at));
case breakfixed: return this.with(new Event(Event.Type.breakfixed, agent, at));
default: return this;
}
}
/**
* Events can be application or node level.
* This returns a copy of this history with all application level events removed.
*/
private History withoutApplicationEvents() {
return new History(asList().stream().filter(e -> ! e.type().isApplicationLevel()).collect(Collectors.toList()));
}
/** Returns the empty history */
public static History empty() { return new History(List.of()); }
@Override
public String toString() {
if (events.isEmpty()) return "history: (empty)";
StringBuilder b = new StringBuilder("history: ");
for (Event e : events)
b.append(e).append(", ");
b.setLength(b.length() - 2);
return b.toString();
}
/** An event which may happen to a node */
public static class Event {
private final Instant at;
private final Agent agent;
private final Type type;
public Event(Event.Type type, Agent agent, Instant at) {
this.type = type;
this.agent = agent;
this.at = at;
}
public enum Type {
activated,
breakfixed(false),
deactivated,
deallocated,
deprovisioned(false),
failed(false),
parked,
provisioned(false),
readied,
reserved,
/** The node was scheduled for retirement (hard) */
wantToRetire(false),
/** The node was scheduled for retirement (soft) */
preferToRetire(false),
/** This node was scheduled for failing */
wantToFail,
/** The active node was retired */
retired,
/** The active node went down according to the service monitor */
down,
/** The active node came up according to the service monitor */
up,
/** The node made a config request, indicating it is live */
requested,
/** The node resources/flavor were changed */
resized(false),
/** The node was rebooted */
rebooted(false),
/** The node upgraded its OS (implies a reboot) */
osUpgraded(false),
/** The node verified its firmware (whether this resulted in a reboot depends on the node model) */
firmwareVerified(false);
private final boolean applicationLevel;
/** Creates an application level event */
Type() {
this.applicationLevel = true;
}
Type(boolean applicationLevel) {
this.applicationLevel = applicationLevel;
}
/** Returns true if this is an application level event and false it it is a node level event */
public boolean isApplicationLevel() { return applicationLevel; }
}
/** Returns the type of event */
public Event.Type type() { return type; }
/** Returns the agent causing this event */
public Agent agent() { return agent; }
/** Returns the instant this even took place */
public Instant at() { return at; }
@Override
public String toString() { return "'" + type + "' event at " + at + " by " + agent; }
}
} | |
Not sure I agree. If some other code misbehaves and adds the same event multiple times, do we want that to push all other events out of the history? I suppose we could add a counter to the event if knowing about repeating ones is important. | public History with(Event event) {
List<Event> copy = new ArrayList<>(events);
if (!copy.isEmpty()) {
Event last = copy.get(copy.size() - 1);
if (last.type() == event.type()) {
copy.remove(last);
}
}
copy.add(event);
return new History(copy);
} | public History with(Event event) {
List<Event> copy = new ArrayList<>(events);
copy.add(event);
return new History(copy);
} | class History {
/** The maximum number of events to keep for a node */
private static final int MAX_SIZE = 15;
private final List<Event> events;
public History(List<Event> events) {
this(events, MAX_SIZE);
}
History(List<Event> events, int maxSize) {
this.events = Objects.requireNonNull(events, "events must be non-null")
.stream()
.sorted(Comparator.comparing(Event::at))
.skip(Math.max(events.size() - maxSize, 0))
.collect(Collectors.toUnmodifiableList());
}
/** Returns the latest event of given type, if it is present in this history */
public Optional<Event> event(Event.Type type) {
return events.stream().filter(event -> event.type() == type).max(Comparator.comparing(Event::at));
}
/** Returns true if a given event is registered in this history at the given time */
public boolean hasEventAt(Event.Type type, Instant time) {
return event(type).map(event -> event.at().equals(time))
.orElse(false);
}
/** Returns true if a given event is registered in this history after the given time */
public boolean hasEventAfter(Event.Type type, Instant time) {
return event(type).map(event -> event.at().isAfter(time))
.orElse(false);
}
/** Returns true if a given event is registered in this history before the given time */
public boolean hasEventBefore(Event.Type type, Instant time) {
return event(type).map(event -> event.at().isBefore(time))
.orElse(false);
}
public List<Event> asList() {
return events;
}
/** Returns a copy of this history with the given event added */
/** Returns a copy of this history with a record of this state transition added, if applicable */
public History recordStateTransition(Node.State from, Node.State to, Agent agent, Instant at) {
if (from == to && from != Node.State.reserved) return this;
switch (to) {
case provisioned: return this.with(new Event(Event.Type.provisioned, agent, at));
case deprovisioned: return this.with(new Event(Event.Type.deprovisioned, agent, at));
case ready: return this.withoutApplicationEvents().with(new Event(Event.Type.readied, agent, at));
case active: return this.with(new Event(Event.Type.activated, agent, at));
case inactive: return this.with(new Event(Event.Type.deactivated, agent, at));
case reserved: return this.with(new Event(Event.Type.reserved, agent, at));
case failed: return this.with(new Event(Event.Type.failed, agent, at));
case dirty: return this.with(new Event(Event.Type.deallocated, agent, at));
case parked: return this.with(new Event(Event.Type.parked, agent, at));
case breakfixed: return this.with(new Event(Event.Type.breakfixed, agent, at));
default: return this;
}
}
/**
* Events can be application or node level.
* This returns a copy of this history with all application level events removed.
*/
private History withoutApplicationEvents() {
return new History(asList().stream().filter(e -> ! e.type().isApplicationLevel()).collect(Collectors.toList()));
}
/** Returns the empty history */
public static History empty() { return new History(List.of()); }
@Override
public String toString() {
if (events.isEmpty()) return "history: (empty)";
StringBuilder b = new StringBuilder("history: ");
for (Event e : events)
b.append(e).append(", ");
b.setLength(b.length() - 2);
return b.toString();
}
/** An event which may happen to a node */
public static class Event {
private final Instant at;
private final Agent agent;
private final Type type;
public Event(Event.Type type, Agent agent, Instant at) {
this.type = type;
this.agent = agent;
this.at = at;
}
public enum Type {
activated,
breakfixed(false),
deactivated,
deallocated,
deprovisioned(false),
failed(false),
parked,
provisioned(false),
readied,
reserved,
/** The node was scheduled for retirement (hard) */
wantToRetire(false),
/** The node was scheduled for retirement (soft) */
preferToRetire(false),
/** This node was scheduled for failing */
wantToFail,
/** The active node was retired */
retired,
/** The active node went down according to the service monitor */
down,
/** The active node came up according to the service monitor */
up,
/** The node made a config request, indicating it is live */
requested,
/** The node resources/flavor were changed */
resized(false),
/** The node was rebooted */
rebooted(false),
/** The node upgraded its OS (implies a reboot) */
osUpgraded(false),
/** The node verified its firmware (whether this resulted in a reboot depends on the node model) */
firmwareVerified(false);
private final boolean applicationLevel;
/** Creates an application level event */
Type() {
this.applicationLevel = true;
}
Type(boolean applicationLevel) {
this.applicationLevel = applicationLevel;
}
/** Returns true if this is an application level event and false it it is a node level event */
public boolean isApplicationLevel() { return applicationLevel; }
}
/** Returns the type of event */
public Event.Type type() { return type; }
/** Returns the agent causing this event */
public Agent agent() { return agent; }
/** Returns the instant this even took place */
public Instant at() { return at; }
@Override
public String toString() { return "'" + type + "' event at " + at + " by " + agent; }
}
} | class History {
/** The maximum number of events to keep for a node */
private static final int MAX_SIZE = 15;
private final List<Event> events;
public History(List<Event> events) {
this(events, MAX_SIZE);
}
History(List<Event> events, int maxSize) {
this.events = Objects.requireNonNull(events, "events must be non-null")
.stream()
.sorted(Comparator.comparing(Event::at))
.skip(Math.max(events.size() - maxSize, 0))
.collect(Collectors.toUnmodifiableList());
}
/** Returns the last event of given type, if it is present in this history */
public Optional<Event> lastEvent(Event.Type type) {
return events.stream().filter(event -> event.type() == type).max(Comparator.comparing(Event::at));
}
/** Returns true if the last event of this type is registered in this history at the given time */
public boolean hasLastEventAt(Instant time, Event.Type type) {
return lastEvent(type).map(event -> event.at().equals(time))
.orElse(false);
}
/** Returns true if the last event of this type is registered after the given time */
public boolean hasLastEventAfter(Instant time, Event.Type type) {
return lastEvent(type).map(event -> event.at().isAfter(time))
.orElse(false);
}
/** Returns true if the last event of this type is registered before the given time */
public boolean hasLastEventBefore(Instant time, Event.Type type) {
return lastEvent(type).map(event -> event.at().isBefore(time))
.orElse(false);
}
public List<Event> asList() {
return events;
}
/** Returns a copy of this history with the given event added */
/** Returns a copy of this history with a record of this state transition added, if applicable */
public History recordStateTransition(Node.State from, Node.State to, Agent agent, Instant at) {
if (from == to && from != Node.State.reserved) return this;
switch (to) {
case provisioned: return this.with(new Event(Event.Type.provisioned, agent, at));
case deprovisioned: return this.with(new Event(Event.Type.deprovisioned, agent, at));
case ready: return this.withoutApplicationEvents().with(new Event(Event.Type.readied, agent, at));
case active: return this.with(new Event(Event.Type.activated, agent, at));
case inactive: return this.with(new Event(Event.Type.deactivated, agent, at));
case reserved: {
History history = this;
if (!events.isEmpty() && events.get(events.size() - 1).type() == Event.Type.reserved) {
history = new History(events.subList(0, events.size() - 1));
}
return history.with(new Event(Event.Type.reserved, agent, at));
}
case failed: return this.with(new Event(Event.Type.failed, agent, at));
case dirty: return this.with(new Event(Event.Type.deallocated, agent, at));
case parked: return this.with(new Event(Event.Type.parked, agent, at));
case breakfixed: return this.with(new Event(Event.Type.breakfixed, agent, at));
default: return this;
}
}
/**
* Events can be application or node level.
* This returns a copy of this history with all application level events removed.
*/
private History withoutApplicationEvents() {
return new History(asList().stream().filter(e -> ! e.type().isApplicationLevel()).collect(Collectors.toList()));
}
/** Returns the empty history */
public static History empty() { return new History(List.of()); }
@Override
public String toString() {
if (events.isEmpty()) return "history: (empty)";
StringBuilder b = new StringBuilder("history: ");
for (Event e : events)
b.append(e).append(", ");
b.setLength(b.length() - 2);
return b.toString();
}
/** An event which may happen to a node */
public static class Event {
private final Instant at;
private final Agent agent;
private final Type type;
public Event(Event.Type type, Agent agent, Instant at) {
this.type = type;
this.agent = agent;
this.at = at;
}
public enum Type {
activated,
breakfixed(false),
deactivated,
deallocated,
deprovisioned(false),
failed(false),
parked,
provisioned(false),
readied,
reserved,
/** The node was scheduled for retirement (hard) */
wantToRetire(false),
/** The node was scheduled for retirement (soft) */
preferToRetire(false),
/** This node was scheduled for failing */
wantToFail,
/** The active node was retired */
retired,
/** The active node went down according to the service monitor */
down,
/** The active node came up according to the service monitor */
up,
/** The node made a config request, indicating it is live */
requested,
/** The node resources/flavor were changed */
resized(false),
/** The node was rebooted */
rebooted(false),
/** The node upgraded its OS (implies a reboot) */
osUpgraded(false),
/** The node verified its firmware (whether this resulted in a reboot depends on the node model) */
firmwareVerified(false);
private final boolean applicationLevel;
/** Creates an application level event */
Type() {
this.applicationLevel = true;
}
Type(boolean applicationLevel) {
this.applicationLevel = applicationLevel;
}
/** Returns true if this is an application level event and false it it is a node level event */
public boolean isApplicationLevel() { return applicationLevel; }
}
/** Returns the type of event */
public Event.Type type() { return type; }
/** Returns the agent causing this event */
public Agent agent() { return agent; }
/** Returns the instant this even took place */
public Instant at() { return at; }
@Override
public String toString() { return "'" + type + "' event at " + at + " by " + agent; }
}
} | |
We could have some code that added a pair of events and pushed everything out too, should we protect against that as well? :-) "The last event of each type" and "the last 15 events" are simple and well-defined. Future devs will have some simple model like that in their head while with this real behavior or more complicated, is what I mean. Anyway, feel free to merge if you prefer, we can address it when it becomes a problem. | public History with(Event event) {
List<Event> copy = new ArrayList<>(events);
if (!copy.isEmpty()) {
Event last = copy.get(copy.size() - 1);
if (last.type() == event.type()) {
copy.remove(last);
}
}
copy.add(event);
return new History(copy);
} | public History with(Event event) {
List<Event> copy = new ArrayList<>(events);
copy.add(event);
return new History(copy);
} | class History {
/** The maximum number of events to keep for a node */
private static final int MAX_SIZE = 15;
private final List<Event> events;
public History(List<Event> events) {
this(events, MAX_SIZE);
}
History(List<Event> events, int maxSize) {
this.events = Objects.requireNonNull(events, "events must be non-null")
.stream()
.sorted(Comparator.comparing(Event::at))
.skip(Math.max(events.size() - maxSize, 0))
.collect(Collectors.toUnmodifiableList());
}
/** Returns the latest event of given type, if it is present in this history */
public Optional<Event> event(Event.Type type) {
return events.stream().filter(event -> event.type() == type).max(Comparator.comparing(Event::at));
}
/** Returns true if a given event is registered in this history at the given time */
public boolean hasEventAt(Event.Type type, Instant time) {
return event(type).map(event -> event.at().equals(time))
.orElse(false);
}
/** Returns true if a given event is registered in this history after the given time */
public boolean hasEventAfter(Event.Type type, Instant time) {
return event(type).map(event -> event.at().isAfter(time))
.orElse(false);
}
/** Returns true if a given event is registered in this history before the given time */
public boolean hasEventBefore(Event.Type type, Instant time) {
return event(type).map(event -> event.at().isBefore(time))
.orElse(false);
}
public List<Event> asList() {
return events;
}
/** Returns a copy of this history with the given event added */
/** Returns a copy of this history with a record of this state transition added, if applicable */
public History recordStateTransition(Node.State from, Node.State to, Agent agent, Instant at) {
if (from == to && from != Node.State.reserved) return this;
switch (to) {
case provisioned: return this.with(new Event(Event.Type.provisioned, agent, at));
case deprovisioned: return this.with(new Event(Event.Type.deprovisioned, agent, at));
case ready: return this.withoutApplicationEvents().with(new Event(Event.Type.readied, agent, at));
case active: return this.with(new Event(Event.Type.activated, agent, at));
case inactive: return this.with(new Event(Event.Type.deactivated, agent, at));
case reserved: return this.with(new Event(Event.Type.reserved, agent, at));
case failed: return this.with(new Event(Event.Type.failed, agent, at));
case dirty: return this.with(new Event(Event.Type.deallocated, agent, at));
case parked: return this.with(new Event(Event.Type.parked, agent, at));
case breakfixed: return this.with(new Event(Event.Type.breakfixed, agent, at));
default: return this;
}
}
/**
* Events can be application or node level.
* This returns a copy of this history with all application level events removed.
*/
private History withoutApplicationEvents() {
return new History(asList().stream().filter(e -> ! e.type().isApplicationLevel()).collect(Collectors.toList()));
}
/** Returns the empty history */
public static History empty() { return new History(List.of()); }
@Override
public String toString() {
if (events.isEmpty()) return "history: (empty)";
StringBuilder b = new StringBuilder("history: ");
for (Event e : events)
b.append(e).append(", ");
b.setLength(b.length() - 2);
return b.toString();
}
/** An event which may happen to a node */
public static class Event {
private final Instant at;
private final Agent agent;
private final Type type;
public Event(Event.Type type, Agent agent, Instant at) {
this.type = type;
this.agent = agent;
this.at = at;
}
public enum Type {
activated,
breakfixed(false),
deactivated,
deallocated,
deprovisioned(false),
failed(false),
parked,
provisioned(false),
readied,
reserved,
/** The node was scheduled for retirement (hard) */
wantToRetire(false),
/** The node was scheduled for retirement (soft) */
preferToRetire(false),
/** This node was scheduled for failing */
wantToFail,
/** The active node was retired */
retired,
/** The active node went down according to the service monitor */
down,
/** The active node came up according to the service monitor */
up,
/** The node made a config request, indicating it is live */
requested,
/** The node resources/flavor were changed */
resized(false),
/** The node was rebooted */
rebooted(false),
/** The node upgraded its OS (implies a reboot) */
osUpgraded(false),
/** The node verified its firmware (whether this resulted in a reboot depends on the node model) */
firmwareVerified(false);
private final boolean applicationLevel;
/** Creates an application level event */
Type() {
this.applicationLevel = true;
}
Type(boolean applicationLevel) {
this.applicationLevel = applicationLevel;
}
/** Returns true if this is an application level event and false it it is a node level event */
public boolean isApplicationLevel() { return applicationLevel; }
}
/** Returns the type of event */
public Event.Type type() { return type; }
/** Returns the agent causing this event */
public Agent agent() { return agent; }
/** Returns the instant this even took place */
public Instant at() { return at; }
@Override
public String toString() { return "'" + type + "' event at " + at + " by " + agent; }
}
} | class History {
/** The maximum number of events to keep for a node */
private static final int MAX_SIZE = 15;
private final List<Event> events;
public History(List<Event> events) {
this(events, MAX_SIZE);
}
History(List<Event> events, int maxSize) {
this.events = Objects.requireNonNull(events, "events must be non-null")
.stream()
.sorted(Comparator.comparing(Event::at))
.skip(Math.max(events.size() - maxSize, 0))
.collect(Collectors.toUnmodifiableList());
}
/** Returns the last event of given type, if it is present in this history */
public Optional<Event> lastEvent(Event.Type type) {
return events.stream().filter(event -> event.type() == type).max(Comparator.comparing(Event::at));
}
/** Returns true if the last event of this type is registered in this history at the given time */
public boolean hasLastEventAt(Instant time, Event.Type type) {
return lastEvent(type).map(event -> event.at().equals(time))
.orElse(false);
}
/** Returns true if the last event of this type is registered after the given time */
public boolean hasLastEventAfter(Instant time, Event.Type type) {
return lastEvent(type).map(event -> event.at().isAfter(time))
.orElse(false);
}
/** Returns true if the last event of this type is registered before the given time */
public boolean hasLastEventBefore(Instant time, Event.Type type) {
return lastEvent(type).map(event -> event.at().isBefore(time))
.orElse(false);
}
public List<Event> asList() {
return events;
}
/** Returns a copy of this history with the given event added */
/** Returns a copy of this history with a record of this state transition added, if applicable */
public History recordStateTransition(Node.State from, Node.State to, Agent agent, Instant at) {
if (from == to && from != Node.State.reserved) return this;
switch (to) {
case provisioned: return this.with(new Event(Event.Type.provisioned, agent, at));
case deprovisioned: return this.with(new Event(Event.Type.deprovisioned, agent, at));
case ready: return this.withoutApplicationEvents().with(new Event(Event.Type.readied, agent, at));
case active: return this.with(new Event(Event.Type.activated, agent, at));
case inactive: return this.with(new Event(Event.Type.deactivated, agent, at));
case reserved: {
History history = this;
if (!events.isEmpty() && events.get(events.size() - 1).type() == Event.Type.reserved) {
history = new History(events.subList(0, events.size() - 1));
}
return history.with(new Event(Event.Type.reserved, agent, at));
}
case failed: return this.with(new Event(Event.Type.failed, agent, at));
case dirty: return this.with(new Event(Event.Type.deallocated, agent, at));
case parked: return this.with(new Event(Event.Type.parked, agent, at));
case breakfixed: return this.with(new Event(Event.Type.breakfixed, agent, at));
default: return this;
}
}
/**
* Events can be application or node level.
* This returns a copy of this history with all application level events removed.
*/
private History withoutApplicationEvents() {
return new History(asList().stream().filter(e -> ! e.type().isApplicationLevel()).collect(Collectors.toList()));
}
/** Returns the empty history */
public static History empty() { return new History(List.of()); }
@Override
public String toString() {
if (events.isEmpty()) return "history: (empty)";
StringBuilder b = new StringBuilder("history: ");
for (Event e : events)
b.append(e).append(", ");
b.setLength(b.length() - 2);
return b.toString();
}
/** An event which may happen to a node */
public static class Event {
private final Instant at;
private final Agent agent;
private final Type type;
public Event(Event.Type type, Agent agent, Instant at) {
this.type = type;
this.agent = agent;
this.at = at;
}
public enum Type {
activated,
breakfixed(false),
deactivated,
deallocated,
deprovisioned(false),
failed(false),
parked,
provisioned(false),
readied,
reserved,
/** The node was scheduled for retirement (hard) */
wantToRetire(false),
/** The node was scheduled for retirement (soft) */
preferToRetire(false),
/** This node was scheduled for failing */
wantToFail,
/** The active node was retired */
retired,
/** The active node went down according to the service monitor */
down,
/** The active node came up according to the service monitor */
up,
/** The node made a config request, indicating it is live */
requested,
/** The node resources/flavor were changed */
resized(false),
/** The node was rebooted */
rebooted(false),
/** The node upgraded its OS (implies a reboot) */
osUpgraded(false),
/** The node verified its firmware (whether this resulted in a reboot depends on the node model) */
firmwareVerified(false);
private final boolean applicationLevel;
/** Creates an application level event */
Type() {
this.applicationLevel = true;
}
Type(boolean applicationLevel) {
this.applicationLevel = applicationLevel;
}
/** Returns true if this is an application level event and false it it is a node level event */
public boolean isApplicationLevel() { return applicationLevel; }
}
/** Returns the type of event */
public Event.Type type() { return type; }
/** Returns the agent causing this event */
public Agent agent() { return agent; }
/** Returns the instant this even took place */
public Instant at() { return at; }
@Override
public String toString() { return "'" + type + "' event at " + at + " by " + agent; }
}
} | |
Don't need regex for this, just use `String::replace` (also replaces all) | public void render(OutputStream outputStream) throws IOException {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
wrapped.render(buffer);
outputStream.write(buffer.toString(Charset.forName(wrapped.getCharacterEncoding()))
.replaceAll(patten, replacement)
.getBytes(UTF_8));
} | .replaceAll(patten, replacement) | public void render(OutputStream outputStream) throws IOException {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
wrapped.render(buffer);
outputStream.write(buffer.toString(Charset.forName(wrapped.getCharacterEncoding()))
.replace(patten, replacement)
.getBytes(UTF_8));
} | class UrlRewritingProxyResponse extends HttpResponse {
final HttpResponse wrapped;
final String patten;
final String replacement;
public UrlRewritingProxyResponse(HttpResponse wrapped, HttpURL requestUrl, HttpURL forwardedUrl) {
super(wrapped.getStatus());
this.wrapped = wrapped;
this.patten = Pattern.quote(requestUrl.withPath(requestUrl.path().withoutTrailingSlash()).withQuery(Query.empty()).asURI().toString());
this.replacement = forwardedUrl.withPath(forwardedUrl.path().withoutTrailingSlash()).withQuery(Query.empty()).asURI().toString();
}
@Override
@Override
public String getContentType() {
return wrapped.getContentType();
}
} | class UrlRewritingProxyResponse extends HttpResponse {
final HttpResponse wrapped;
final String patten;
final String replacement;
public UrlRewritingProxyResponse(HttpResponse wrapped, HttpURL requestUrl, HttpURL forwardedUrl) {
super(wrapped.getStatus());
this.wrapped = wrapped;
this.patten = requestUrl.withPath(requestUrl.path().withoutTrailingSlash()).withQuery(Query.empty()).asURI().toString();
this.replacement = forwardedUrl.withPath(forwardedUrl.path().withoutTrailingSlash()).withQuery(Query.empty()).asURI().toString();
}
@Override
@Override
public String getContentType() {
return wrapped.getContentType();
}
} |
Nice! | public void render(OutputStream outputStream) throws IOException {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
wrapped.render(buffer);
outputStream.write(buffer.toString(Charset.forName(wrapped.getCharacterEncoding()))
.replaceAll(patten, replacement)
.getBytes(UTF_8));
} | .replaceAll(patten, replacement) | public void render(OutputStream outputStream) throws IOException {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
wrapped.render(buffer);
outputStream.write(buffer.toString(Charset.forName(wrapped.getCharacterEncoding()))
.replace(patten, replacement)
.getBytes(UTF_8));
} | class UrlRewritingProxyResponse extends HttpResponse {
final HttpResponse wrapped;
final String patten;
final String replacement;
public UrlRewritingProxyResponse(HttpResponse wrapped, HttpURL requestUrl, HttpURL forwardedUrl) {
super(wrapped.getStatus());
this.wrapped = wrapped;
this.patten = Pattern.quote(requestUrl.withPath(requestUrl.path().withoutTrailingSlash()).withQuery(Query.empty()).asURI().toString());
this.replacement = forwardedUrl.withPath(forwardedUrl.path().withoutTrailingSlash()).withQuery(Query.empty()).asURI().toString();
}
@Override
@Override
public String getContentType() {
return wrapped.getContentType();
}
} | class UrlRewritingProxyResponse extends HttpResponse {
final HttpResponse wrapped;
final String patten;
final String replacement;
public UrlRewritingProxyResponse(HttpResponse wrapped, HttpURL requestUrl, HttpURL forwardedUrl) {
super(wrapped.getStatus());
this.wrapped = wrapped;
this.patten = requestUrl.withPath(requestUrl.path().withoutTrailingSlash()).withQuery(Query.empty()).asURI().toString();
this.replacement = forwardedUrl.withPath(forwardedUrl.path().withoutTrailingSlash()).withQuery(Query.empty()).asURI().toString();
}
@Override
@Override
public String getContentType() {
return wrapped.getContentType();
}
} |
```suggestion return !history().hasEventAfter(History.Event.Type.up, downAt.get()); ``` | public boolean isDown() {
Optional<Instant> downAt = history().event(History.Event.Type.down).map(History.Event::at);
if (downAt.isEmpty()) return false;
Optional<Instant> upAt = history().event(History.Event.Type.up).map(History.Event::at);
if (upAt.isEmpty()) return true;
return !downAt.get().isBefore(upAt.get());
} | return !downAt.get().isBefore(upAt.get()); | public boolean isDown() {
Optional<Instant> downAt = history().event(History.Event.Type.down).map(History.Event::at);
if (downAt.isEmpty()) return false;
return !history().hasEventAfter(History.Event.Type.up, downAt.get());
} | class Node implements Nodelike {
private final String hostname;
private final IP.Config ipConfig;
private final String id;
private final Optional<String> parentHostname;
private final Flavor flavor;
private final Status status;
private final State state;
private final NodeType type;
private final Reports reports;
private final Optional<String> modelName;
private final Optional<TenantName> reservedTo;
private final Optional<ApplicationId> exclusiveToApplicationId;
private final Optional<ClusterSpec.Type> exclusiveToClusterType;
private final Optional<String> switchHostname;
private final List<TrustStoreItem> trustStoreItems;
/** Record of the last event of each type happening to this node */
private final History history;
/** The current allocation of this node, if any */
private final Optional<Allocation> allocation;
/** Creates a node builder in the initial state (reserved) */
public static Node.Builder reserve(Set<String> ipAddresses, String hostname, String parentHostname, NodeResources resources, NodeType type) {
return new Node.Builder(UUID.randomUUID().toString(), hostname, new Flavor(resources), State.reserved, type)
.ipConfig(IP.Config.ofEmptyPool(ipAddresses))
.parentHostname(parentHostname);
}
/** Creates a node builder in the initial state (provisioned) */
public static Node.Builder create(String id, IP.Config ipConfig, String hostname, Flavor flavor, NodeType type) {
return new Node.Builder(id, hostname, flavor, State.provisioned, type).ipConfig(ipConfig);
}
/** Creates a node builder */
public static Node.Builder create(String id, String hostname, Flavor flavor, Node.State state, NodeType type) {
return new Node.Builder(id, hostname, flavor, state, type);
}
/** DO NOT USE: public for serialization purposes. See {@code create} helper methods. */
public Node(String id, IP.Config ipConfig, String hostname, Optional<String> parentHostname,
Flavor flavor, Status status, State state, Optional<Allocation> allocation, History history, NodeType type,
Reports reports, Optional<String> modelName, Optional<TenantName> reservedTo,
Optional<ApplicationId> exclusiveToApplicationId, Optional<ClusterSpec.Type> exclusiveToClusterType,
Optional<String> switchHostname, List<TrustStoreItem> trustStoreItems) {
this.id = Objects.requireNonNull(id, "A node must have an ID");
this.hostname = requireNonEmptyString(hostname, "A node must have a hostname");
this.ipConfig = Objects.requireNonNull(ipConfig, "A node must a have an IP config");
this.parentHostname = requireNonEmptyString(parentHostname, "A parent host name must be a proper value");
this.flavor = Objects.requireNonNull(flavor, "A node must have a flavor");
this.status = Objects.requireNonNull(status, "A node must have a status");
this.state = Objects.requireNonNull(state, "A null node state is not permitted");
this.allocation = Objects.requireNonNull(allocation, "A null node allocation is not permitted");
this.history = Objects.requireNonNull(history, "A null node history is not permitted");
this.type = Objects.requireNonNull(type, "A null node type is not permitted");
this.reports = Objects.requireNonNull(reports, "A null reports is not permitted");
this.modelName = Objects.requireNonNull(modelName, "A null modelName is not permitted");
this.reservedTo = Objects.requireNonNull(reservedTo, "reservedTo cannot be null");
this.exclusiveToApplicationId = Objects.requireNonNull(exclusiveToApplicationId, "exclusiveToApplicationId cannot be null");
this.exclusiveToClusterType = Objects.requireNonNull(exclusiveToClusterType, "exclusiveToClusterType cannot be null");
this.switchHostname = requireNonEmptyString(switchHostname, "switchHostname cannot be null");
this.trustStoreItems = trustStoreItems.stream().distinct().collect(Collectors.toUnmodifiableList());
if (state == State.active)
requireNonEmpty(ipConfig.primary(), "Active node " + hostname + " must have at least one valid IP address");
if (state == State.ready && type.isHost()) {
requireNonEmpty(ipConfig.primary(), "A " + type + " must have at least one primary IP address in state " + state);
requireNonEmpty(ipConfig.pool().ipSet(), "A " + type + " must have a non-empty IP address pool in state " + state);
}
if (parentHostname.isPresent()) {
if (!ipConfig.pool().ipSet().isEmpty()) throw new IllegalArgumentException("A child node cannot have an IP address pool");
if (modelName.isPresent()) throw new IllegalArgumentException("A child node cannot have model name set");
if (switchHostname.isPresent()) throw new IllegalArgumentException("A child node cannot have switch hostname set");
}
if (type != NodeType.host && reservedTo.isPresent())
throw new IllegalArgumentException("Only tenant hosts can be reserved to a tenant");
if (type != NodeType.host && exclusiveToApplicationId.isPresent())
throw new IllegalArgumentException("Only tenant hosts can be exclusive to an application");
if (type != NodeType.host && exclusiveToClusterType.isPresent())
throw new IllegalArgumentException("Only tenant hosts can be exclusive to a cluster type");
}
/** Returns the IP config of this node */
public IP.Config ipConfig() { return ipConfig; }
/** Returns the host name of this node */
public String hostname() { return hostname; }
/**
* Unique identifier for this node. Code should not depend on this as its main purpose is to aid human operators in
* mapping a node to the corresponding cloud instance. No particular format is enforced.
*
* Formats used vary between the underlying cloud providers:
*
* - OpenStack: UUID
* - AWS: Instance ID
* - Linux containers: UUID
*/
public String id() { return id; }
@Override
public Optional<String> parentHostname() { return parentHostname; }
public boolean hasParent(String hostname) {
return parentHostname.isPresent() && parentHostname.get().equals(hostname);
}
@Override
public NodeResources resources() { return flavor.resources(); }
/** Returns the flavor of this node */
public Flavor flavor() { return flavor; }
/** Returns the known information about the node's ephemeral status */
public Status status() { return status; }
/** Returns the current state of this node (in the node state machine) */
public State state() { return state; }
@Override
public NodeType type() { return type; }
/** Returns the current allocation of this, if any */
public Optional<Allocation> allocation() { return allocation; }
/** Returns the current allocation when it must exist, or throw exception there is not allocation. */
private Allocation requireAllocation(String message) {
final Optional<Allocation> allocation = this.allocation;
if ( ! allocation.isPresent())
throw new IllegalStateException(message + " for " + hostname() + ": The node is unallocated");
return allocation.get();
}
/** Returns a history of the last events happening to this node */
public History history() { return history; }
/** Returns all the reports on this node. */
public Reports reports() { return reports; }
/** Returns the hardware model of this node, if any */
public Optional<String> modelName() { return modelName; }
/**
* Returns the tenant this node is reserved to, if any. Only hosts can be reserved to a tenant.
* If this is set, resources on this host cannot be allocated to any other tenant
*/
public Optional<TenantName> reservedTo() { return reservedTo; }
/**
* Returns the application this host is exclusive to, if any. Only tenant hosts can be exclusive to an application.
* If this is set, resources on this host cannot be allocated to any other application. This is set during
* provisioning and applies for the entire lifetime of the host
*/
public Optional<ApplicationId> exclusiveToApplicationId() { return exclusiveToApplicationId; }
/**
* Returns the cluster type this host is exclusive to, if any. Only tenant hosts can be exclusive to a cluster type.
* If this is set, resources on this host cannot be allocated to any other cluster type. This is set during
* provisioning and applies for the entire lifetime of the host
*/
public Optional<ClusterSpec.Type> exclusiveToClusterType() { return exclusiveToClusterType; }
/** Returns the hostname of the switch this node is connected to, if any */
public Optional<String> switchHostname() {
return switchHostname;
}
/** Returns the trusted certificates for this host if any. */
public List<TrustStoreItem> trustedCertificates() {
return trustStoreItems;
}
/**
* Returns a copy of this where wantToFail is set to true and history is updated to reflect this.
*/
public Node withWantToFail(boolean wantToFail, Agent agent, Instant at) {
Node node = this.with(status.withWantToFail(wantToFail));
if (wantToFail)
node = node.with(history.with(new History.Event(History.Event.Type.wantToFail, agent, at)));
return node;
}
/**
* Returns a copy of this node with wantToRetire and wantToDeprovision set to the given values and updated history.
*
* If both given wantToRetire and wantToDeprovision are equal to the current values, the method is no-op.
*/
public Node withWantToRetire(boolean wantToRetire, boolean wantToDeprovision, Agent agent, Instant at) {
return withWantToRetire(wantToRetire, wantToDeprovision, false, agent, at);
}
/**
* Returns a copy of this node with wantToRetire, wantToDeprovision and wantToRebuild set to the given values
* and updated history.
*
* If all given values are equal to the current ones, the method is no-op.
*/
public Node withWantToRetire(boolean wantToRetire, boolean wantToDeprovision, boolean wantToRebuild, Agent agent, Instant at) {
if (wantToRetire == status.wantToRetire() &&
wantToDeprovision == status.wantToDeprovision() &&
wantToRebuild == status.wantToRebuild()) return this;
Node node = this.with(status.withWantToRetire(wantToRetire, wantToDeprovision, wantToRebuild));
if (wantToRetire)
node = node.with(history.with(new History.Event(History.Event.Type.wantToRetire, agent, at)));
return node;
}
public Node withWantToRetire(boolean wantToRetire, Agent agent, Instant at) {
return withWantToRetire(wantToRetire, status.wantToDeprovision(), agent, at);
}
/** Returns a copy of this node with preferToRetire set to given value and updated history */
public Node withPreferToRetire(boolean preferToRetire, Agent agent, Instant at) {
if (preferToRetire == status.preferToRetire()) return this;
Node node = this.with(status.withPreferToRetire(preferToRetire));
if (preferToRetire) {
node = node.with(history.with(new History.Event(History.Event.Type.preferToRetire, agent, at)));
}
return node;
}
/**
* Returns a copy of this node which is retired.
* If the node was already retired it is returned as-is.
*/
public Node retire(Agent agent, Instant retiredAt) {
Allocation allocation = requireAllocation("Cannot retire");
if (allocation.membership().retired()) return this;
return with(allocation.retire())
.with(history.with(new History.Event(History.Event.Type.retired, agent, retiredAt)));
}
/** Returns a copy of this node which is retired */
public Node retire(Instant retiredAt) {
if (status.wantToRetire() || status.preferToRetire())
return retire(Agent.system, retiredAt);
else
return retire(Agent.application, retiredAt);
}
/** Returns a copy of this node which is not retired */
public Node unretire() {
return with(requireAllocation("Cannot unretire").unretire());
}
/** Returns a copy of this with removable set to the given value */
public Node removable(boolean removable) {
return with(requireAllocation("Cannot set removable").removable(removable));
}
/** Returns a copy of this with the restart generation set to generation */
public Node withRestart(Generation generation) {
Allocation allocation = requireAllocation("Cannot set restart generation");
return with(allocation.withRestart(generation));
}
/** Returns a node with the status assigned to the given value */
public Node with(Status status) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, history, type,
reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
/** Returns a node with the type assigned to the given value */
public Node with(NodeType type) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, history, type,
reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
/** Returns a node with the flavor assigned to the given value */
public Node with(Flavor flavor, Agent agent, Instant instant) {
if (flavor.equals(this.flavor)) return this;
History updateHistory = history.with(new History.Event(History.Event.Type.resized, agent, instant));
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, updateHistory, type,
reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
/** Returns a copy of this with the reboot generation set to generation */
public Node withReboot(Generation generation) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status.withReboot(generation), state,
allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
/** Returns a copy of this with given id set */
public Node withId(String id) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
/** Returns a copy of this with model name set to given value */
public Node withModelName(String modelName) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, Optional.of(modelName), reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
/** Returns a copy of this with model name cleared */
public Node withoutModelName() {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, Optional.empty(), reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
/** Returns a copy of this with a history record saying it was detected to be down at this instant */
public Node downAt(Instant instant, Agent agent) {
return with(history.with(new History.Event(History.Event.Type.down, agent, instant)));
}
/** Returns a copy of this with any history record saying it has been detected down removed */
public Node upAt(Instant instant, Agent agent) {
return with(history.with(new History.Event(History.Event.Type.up, agent, instant)));
}
/** Returns whether this node is down, according to its recorded 'down' and 'up' events */
/** Returns a copy of this with allocation set as specified. <code>node.state</code> is *not* changed. */
public Node allocate(ApplicationId owner, ClusterMembership membership, NodeResources requestedResources, Instant at) {
return this
.with(new Allocation(owner, membership, requestedResources, new Generation(0, 0), false))
.with(history.with(new History.Event(History.Event.Type.reserved, Agent.application, at)));
}
/**
* Returns a copy of this node with the allocation assigned to the given allocation.
* Do not use this to allocate a node.
*/
public Node with(Allocation allocation) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
Optional.of(allocation), history, type, reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
/** Returns a new Node without an allocation. */
public Node withoutAllocation() {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
Optional.empty(), history, type, reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
/** Returns a copy of this node with IP config set to the given value. */
public Node with(IP.Config ipConfig) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
/** Returns a copy of this node with the parent hostname assigned to the given value. */
public Node withParentHostname(String parentHostname) {
return new Node(id, ipConfig, hostname, Optional.of(parentHostname), flavor, status, state,
allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
public Node withReservedTo(TenantName tenant) {
if (type != NodeType.host)
throw new IllegalArgumentException("Only host nodes can be reserved, " + hostname + " has type " + type);
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, modelName, Optional.of(tenant), exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
/** Returns a copy of this node which is not reserved to a tenant */
public Node withoutReservedTo() {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, modelName, Optional.empty(), exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
public Node withExclusiveToApplicationId(ApplicationId exclusiveTo) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, modelName, reservedTo, Optional.ofNullable(exclusiveTo), exclusiveToClusterType, switchHostname, trustStoreItems);
}
public Node withExclusiveToClusterType(ClusterSpec.Type exclusiveTo) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId, Optional.ofNullable(exclusiveTo), switchHostname, trustStoreItems);
}
/** Returns a copy of this node with switch hostname set to given value */
public Node withSwitchHostname(String switchHostname) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, Optional.ofNullable(switchHostname), trustStoreItems);
}
/** Returns a copy of this node with switch hostname unset */
public Node withoutSwitchHostname() {
return withSwitchHostname(null);
}
/** Returns a copy of this node with the current reboot generation set to the given number at the given instant */
public Node withCurrentRebootGeneration(long generation, Instant instant) {
if (generation < status.reboot().current())
throw new IllegalArgumentException("Cannot set reboot generation to " + generation +
": lower than current generation: " + status.reboot().current());
Status newStatus = status().withReboot(status().reboot().withCurrent(generation));
History newHistory = history.with(new History.Event(History.Event.Type.rebooted, Agent.system, instant));
return this.with(newStatus).with(newHistory);
}
/** Returns a copy of this node with the current OS version set to the given version at the given instant */
public Node withCurrentOsVersion(Version version, Instant instant) {
Optional<Version> newVersion = Optional.of(version);
if (status.osVersion().current().equals(newVersion)) return this;
History newHistory = history();
if (status.osVersion().current().isPresent() && !status.osVersion().current().equals(newVersion)) {
newHistory = history.with(new History.Event(History.Event.Type.osUpgraded, Agent.system, instant));
}
Status newStatus = status.withOsVersion(status.osVersion().withCurrent(newVersion));
return this.with(newStatus).with(newHistory);
}
/** Returns a copy of this node with firmware verified at the given instant */
public Node withFirmwareVerifiedAt(Instant instant) {
var newStatus = status.withFirmwareVerifiedAt(instant);
var newHistory = history.with(new History.Event(History.Event.Type.firmwareVerified, Agent.system, instant));
return this.with(newStatus).with(newHistory);
}
/** Returns a copy of this node with the given history. */
public Node with(History history) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
public Node with(Reports reports) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
public Node with(List<TrustStoreItem> trustStoreItems) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname,
trustStoreItems);
}
private static Optional<String> requireNonEmptyString(Optional<String> value, String message) {
Objects.requireNonNull(value, message);
value.ifPresent(v -> requireNonEmptyString(v, message));
return value;
}
private static String requireNonEmptyString(String value, String message) {
Objects.requireNonNull(value, message);
if (value.trim().isEmpty())
throw new IllegalArgumentException(message + ", but was '" + value + "'");
return value;
}
private static Set<String> requireNonEmpty(Set<String> values, String message) {
if (values == null || values.isEmpty())
throw new IllegalArgumentException(message);
return values;
}
/** Computes the allocation skew of a host node */
public static double skew(NodeResources totalHostCapacity, NodeResources freeHostCapacity) {
NodeResources all = totalHostCapacity.justNumbers();
NodeResources allocated = all.subtract(freeHostCapacity.justNumbers());
return new Mean(allocated.vcpu() / all.vcpu(),
allocated.memoryGb() / all.memoryGb(),
allocated.diskGb() / all.diskGb())
.deviation();
}
/** Returns the ACL for the node (trusted nodes, networks and ports) */
public NodeAcl acl(NodeList allNodes, LoadBalancers loadBalancers) {
return NodeAcl.from(this, allNodes, loadBalancers);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Node node = (Node) o;
return hostname.equals(node.hostname);
}
@Override
public int hashCode() {
return Objects.hash(hostname);
}
@Override
public String toString() {
return state +
( parentHostname.isPresent() ? " child node " : " host " ) +
hostname +
( allocation.isPresent() ? " " + allocation.get() : "");
}
public enum State {
/** This node has been requested, but is not yet ready for use */
provisioned,
/** This node is free and ready for use */
ready,
/** This node has been reserved by an application but is not yet used by it */
reserved,
/** This node is in active use by an application */
active,
/** This node has been used by an application, is still allocated to it and retains the data needed for its allocated role */
inactive,
/** This node is not allocated to an application but may contain data which must be cleaned before it is ready */
dirty,
/** This node has failed and must be repaired or removed. The node retains any allocation data for diagnosis. */
failed,
/**
* This node should not currently be used.
* This state follows the same rules as failed except that it will never be automatically moved out of
* this state.
*/
parked,
/** This host has previously been in use but is now removed. */
deprovisioned,
/** This host is currently undergoing repair. */
breakfixed;
/** Returns whether this is a state where the node is assigned to an application */
public boolean isAllocated() {
return allocatedStates().contains(this);
}
public static Set<State> allocatedStates() {
return EnumSet.of(reserved, active, inactive, dirty, failed, parked);
}
}
/** The mean and mean deviation (squared difference) of a bunch of numbers */
private static class Mean {
private final double mean;
private final double deviation;
private Mean(double ... numbers) {
mean = Arrays.stream(numbers).sum() / numbers.length;
deviation = Arrays.stream(numbers).map(n -> Math.pow(mean - n, 2)).sum() / numbers.length;
}
public double deviation() { return deviation; }
}
public static class Builder {
private final String id;
private final String hostname;
private final Flavor flavor;
private final State state;
private final NodeType type;
private String parentHostname;
private String modelName;
private TenantName reservedTo;
private ApplicationId exclusiveToApplicationId;
private ClusterSpec.Type exclusiveToClusterType;
private String switchHostname;
private Allocation allocation;
private IP.Config ipConfig;
private Status status;
private Reports reports;
private History history;
private List<TrustStoreItem> trustStoreItems;
private Builder(String id, String hostname, Flavor flavor, State state, NodeType type) {
this.id = id;
this.hostname = hostname;
this.flavor = flavor;
this.state = state;
this.type = type;
}
public Builder parentHostname(String parentHostname) {
this.parentHostname = parentHostname;
return this;
}
public Builder modelName(String modelName) {
this.modelName = modelName;
return this;
}
public Builder reservedTo(TenantName reservedTo) {
this.reservedTo = reservedTo;
return this;
}
public Builder exclusiveToApplicationId(ApplicationId exclusiveTo) {
this.exclusiveToApplicationId = exclusiveTo;
return this;
}
public Builder exclusiveToClusterType(ClusterSpec.Type exclusiveTo) {
this.exclusiveToClusterType = exclusiveTo;
return this;
}
public Builder switchHostname(String switchHostname) {
this.switchHostname = switchHostname;
return this;
}
public Builder allocation(Allocation allocation) {
this.allocation = allocation;
return this;
}
public Builder ipConfig(IP.Config ipConfig) {
this.ipConfig = ipConfig;
return this;
}
public Builder ipConfigWithEmptyPool(Set<String> primary) {
this.ipConfig = IP.Config.ofEmptyPool(primary);
return this;
}
public Builder status(Status status) {
this.status = status;
return this;
}
public Builder reports(Reports reports) {
this.reports = reports;
return this;
}
public Builder history(History history) {
this.history = history;
return this;
}
public Builder trustedCertificates(List<TrustStoreItem> trustStoreItems) {
this.trustStoreItems = trustStoreItems;
return this;
}
public Node build() {
return new Node(id, Optional.ofNullable(ipConfig).orElse(IP.Config.EMPTY), hostname, Optional.ofNullable(parentHostname),
flavor, Optional.ofNullable(status).orElseGet(Status::initial), state, Optional.ofNullable(allocation),
Optional.ofNullable(history).orElseGet(History::empty), type, Optional.ofNullable(reports).orElseGet(Reports::new),
Optional.ofNullable(modelName), Optional.ofNullable(reservedTo), Optional.ofNullable(exclusiveToApplicationId),
Optional.ofNullable(exclusiveToClusterType), Optional.ofNullable(switchHostname),
Optional.ofNullable(trustStoreItems).orElseGet(List::of));
}
}
} | class Node implements Nodelike {
private final String hostname;
private final IP.Config ipConfig;
private final String id;
private final Optional<String> parentHostname;
private final Flavor flavor;
private final Status status;
private final State state;
private final NodeType type;
private final Reports reports;
private final Optional<String> modelName;
private final Optional<TenantName> reservedTo;
private final Optional<ApplicationId> exclusiveToApplicationId;
private final Optional<ClusterSpec.Type> exclusiveToClusterType;
private final Optional<String> switchHostname;
private final List<TrustStoreItem> trustStoreItems;
/** Record of the last event of each type happening to this node */
private final History history;
/** The current allocation of this node, if any */
private final Optional<Allocation> allocation;
/** Creates a node builder in the initial state (reserved) */
public static Node.Builder reserve(Set<String> ipAddresses, String hostname, String parentHostname, NodeResources resources, NodeType type) {
return new Node.Builder(UUID.randomUUID().toString(), hostname, new Flavor(resources), State.reserved, type)
.ipConfig(IP.Config.ofEmptyPool(ipAddresses))
.parentHostname(parentHostname);
}
/** Creates a node builder in the initial state (provisioned) */
public static Node.Builder create(String id, IP.Config ipConfig, String hostname, Flavor flavor, NodeType type) {
return new Node.Builder(id, hostname, flavor, State.provisioned, type).ipConfig(ipConfig);
}
/** Creates a node builder */
public static Node.Builder create(String id, String hostname, Flavor flavor, Node.State state, NodeType type) {
return new Node.Builder(id, hostname, flavor, state, type);
}
/** DO NOT USE: public for serialization purposes. See {@code create} helper methods. */
public Node(String id, IP.Config ipConfig, String hostname, Optional<String> parentHostname,
Flavor flavor, Status status, State state, Optional<Allocation> allocation, History history, NodeType type,
Reports reports, Optional<String> modelName, Optional<TenantName> reservedTo,
Optional<ApplicationId> exclusiveToApplicationId, Optional<ClusterSpec.Type> exclusiveToClusterType,
Optional<String> switchHostname, List<TrustStoreItem> trustStoreItems) {
this.id = Objects.requireNonNull(id, "A node must have an ID");
this.hostname = requireNonEmptyString(hostname, "A node must have a hostname");
this.ipConfig = Objects.requireNonNull(ipConfig, "A node must a have an IP config");
this.parentHostname = requireNonEmptyString(parentHostname, "A parent host name must be a proper value");
this.flavor = Objects.requireNonNull(flavor, "A node must have a flavor");
this.status = Objects.requireNonNull(status, "A node must have a status");
this.state = Objects.requireNonNull(state, "A null node state is not permitted");
this.allocation = Objects.requireNonNull(allocation, "A null node allocation is not permitted");
this.history = Objects.requireNonNull(history, "A null node history is not permitted");
this.type = Objects.requireNonNull(type, "A null node type is not permitted");
this.reports = Objects.requireNonNull(reports, "A null reports is not permitted");
this.modelName = Objects.requireNonNull(modelName, "A null modelName is not permitted");
this.reservedTo = Objects.requireNonNull(reservedTo, "reservedTo cannot be null");
this.exclusiveToApplicationId = Objects.requireNonNull(exclusiveToApplicationId, "exclusiveToApplicationId cannot be null");
this.exclusiveToClusterType = Objects.requireNonNull(exclusiveToClusterType, "exclusiveToClusterType cannot be null");
this.switchHostname = requireNonEmptyString(switchHostname, "switchHostname cannot be null");
this.trustStoreItems = trustStoreItems.stream().distinct().collect(Collectors.toUnmodifiableList());
if (state == State.active)
requireNonEmpty(ipConfig.primary(), "Active node " + hostname + " must have at least one valid IP address");
if (state == State.ready && type.isHost()) {
requireNonEmpty(ipConfig.primary(), "A " + type + " must have at least one primary IP address in state " + state);
requireNonEmpty(ipConfig.pool().ipSet(), "A " + type + " must have a non-empty IP address pool in state " + state);
}
if (parentHostname.isPresent()) {
if (!ipConfig.pool().ipSet().isEmpty()) throw new IllegalArgumentException("A child node cannot have an IP address pool");
if (modelName.isPresent()) throw new IllegalArgumentException("A child node cannot have model name set");
if (switchHostname.isPresent()) throw new IllegalArgumentException("A child node cannot have switch hostname set");
}
if (type != NodeType.host && reservedTo.isPresent())
throw new IllegalArgumentException("Only tenant hosts can be reserved to a tenant");
if (type != NodeType.host && exclusiveToApplicationId.isPresent())
throw new IllegalArgumentException("Only tenant hosts can be exclusive to an application");
if (type != NodeType.host && exclusiveToClusterType.isPresent())
throw new IllegalArgumentException("Only tenant hosts can be exclusive to a cluster type");
}
/** Returns the IP config of this node */
public IP.Config ipConfig() { return ipConfig; }
/** Returns the host name of this node */
public String hostname() { return hostname; }
/**
* Unique identifier for this node. Code should not depend on this as its main purpose is to aid human operators in
* mapping a node to the corresponding cloud instance. No particular format is enforced.
*
* Formats used vary between the underlying cloud providers:
*
* - OpenStack: UUID
* - AWS: Instance ID
* - Linux containers: UUID
*/
public String id() { return id; }
@Override
public Optional<String> parentHostname() { return parentHostname; }
public boolean hasParent(String hostname) {
return parentHostname.isPresent() && parentHostname.get().equals(hostname);
}
@Override
public NodeResources resources() { return flavor.resources(); }
/** Returns the flavor of this node */
public Flavor flavor() { return flavor; }
/** Returns the known information about the node's ephemeral status */
public Status status() { return status; }
/** Returns the current state of this node (in the node state machine) */
public State state() { return state; }
@Override
public NodeType type() { return type; }
/** Returns the current allocation of this, if any */
public Optional<Allocation> allocation() { return allocation; }
/** Returns the current allocation when it must exist, or throw exception there is not allocation. */
private Allocation requireAllocation(String message) {
final Optional<Allocation> allocation = this.allocation;
if ( ! allocation.isPresent())
throw new IllegalStateException(message + " for " + hostname() + ": The node is unallocated");
return allocation.get();
}
/** Returns a history of the last events happening to this node */
public History history() { return history; }
/** Returns all the reports on this node. */
public Reports reports() { return reports; }
/** Returns the hardware model of this node, if any */
public Optional<String> modelName() { return modelName; }
/**
* Returns the tenant this node is reserved to, if any. Only hosts can be reserved to a tenant.
* If this is set, resources on this host cannot be allocated to any other tenant
*/
public Optional<TenantName> reservedTo() { return reservedTo; }
/**
* Returns the application this host is exclusive to, if any. Only tenant hosts can be exclusive to an application.
* If this is set, resources on this host cannot be allocated to any other application. This is set during
* provisioning and applies for the entire lifetime of the host
*/
public Optional<ApplicationId> exclusiveToApplicationId() { return exclusiveToApplicationId; }
/**
* Returns the cluster type this host is exclusive to, if any. Only tenant hosts can be exclusive to a cluster type.
* If this is set, resources on this host cannot be allocated to any other cluster type. This is set during
* provisioning and applies for the entire lifetime of the host
*/
public Optional<ClusterSpec.Type> exclusiveToClusterType() { return exclusiveToClusterType; }
/** Returns the hostname of the switch this node is connected to, if any */
public Optional<String> switchHostname() {
return switchHostname;
}
/** Returns the trusted certificates for this host if any. */
public List<TrustStoreItem> trustedCertificates() {
return trustStoreItems;
}
/**
* Returns a copy of this where wantToFail is set to true and history is updated to reflect this.
*/
public Node withWantToFail(boolean wantToFail, Agent agent, Instant at) {
Node node = this.with(status.withWantToFail(wantToFail));
if (wantToFail)
node = node.with(history.with(new History.Event(History.Event.Type.wantToFail, agent, at)));
return node;
}
/**
* Returns a copy of this node with wantToRetire and wantToDeprovision set to the given values and updated history.
*
* If both given wantToRetire and wantToDeprovision are equal to the current values, the method is no-op.
*/
public Node withWantToRetire(boolean wantToRetire, boolean wantToDeprovision, Agent agent, Instant at) {
return withWantToRetire(wantToRetire, wantToDeprovision, false, agent, at);
}
/**
* Returns a copy of this node with wantToRetire, wantToDeprovision and wantToRebuild set to the given values
* and updated history.
*
* If all given values are equal to the current ones, the method is no-op.
*/
public Node withWantToRetire(boolean wantToRetire, boolean wantToDeprovision, boolean wantToRebuild, Agent agent, Instant at) {
if (wantToRetire == status.wantToRetire() &&
wantToDeprovision == status.wantToDeprovision() &&
wantToRebuild == status.wantToRebuild()) return this;
Node node = this.with(status.withWantToRetire(wantToRetire, wantToDeprovision, wantToRebuild));
if (wantToRetire)
node = node.with(history.with(new History.Event(History.Event.Type.wantToRetire, agent, at)));
return node;
}
public Node withWantToRetire(boolean wantToRetire, Agent agent, Instant at) {
return withWantToRetire(wantToRetire, status.wantToDeprovision(), agent, at);
}
/** Returns a copy of this node with preferToRetire set to given value and updated history */
public Node withPreferToRetire(boolean preferToRetire, Agent agent, Instant at) {
if (preferToRetire == status.preferToRetire()) return this;
Node node = this.with(status.withPreferToRetire(preferToRetire));
if (preferToRetire) {
node = node.with(history.with(new History.Event(History.Event.Type.preferToRetire, agent, at)));
}
return node;
}
/**
* Returns a copy of this node which is retired.
* If the node was already retired it is returned as-is.
*/
public Node retire(Agent agent, Instant retiredAt) {
Allocation allocation = requireAllocation("Cannot retire");
if (allocation.membership().retired()) return this;
return with(allocation.retire())
.with(history.with(new History.Event(History.Event.Type.retired, agent, retiredAt)));
}
/** Returns a copy of this node which is retired */
public Node retire(Instant retiredAt) {
if (status.wantToRetire() || status.preferToRetire())
return retire(Agent.system, retiredAt);
else
return retire(Agent.application, retiredAt);
}
/** Returns a copy of this node which is not retired */
public Node unretire() {
return with(requireAllocation("Cannot unretire").unretire());
}
/** Returns a copy of this with removable set to the given value */
public Node removable(boolean removable) {
return with(requireAllocation("Cannot set removable").removable(removable));
}
/** Returns a copy of this with the restart generation set to generation */
public Node withRestart(Generation generation) {
Allocation allocation = requireAllocation("Cannot set restart generation");
return with(allocation.withRestart(generation));
}
/** Returns a node with the status assigned to the given value */
public Node with(Status status) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, history, type,
reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
/** Returns a node with the type assigned to the given value */
public Node with(NodeType type) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, history, type,
reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
/** Returns a node with the flavor assigned to the given value */
public Node with(Flavor flavor, Agent agent, Instant instant) {
if (flavor.equals(this.flavor)) return this;
History updateHistory = history.with(new History.Event(History.Event.Type.resized, agent, instant));
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, updateHistory, type,
reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
/** Returns a copy of this with the reboot generation set to generation */
public Node withReboot(Generation generation) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status.withReboot(generation), state,
allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
/** Returns a copy of this with given id set */
public Node withId(String id) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
/** Returns a copy of this with model name set to given value */
public Node withModelName(String modelName) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, Optional.of(modelName), reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
/** Returns a copy of this with model name cleared */
public Node withoutModelName() {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, Optional.empty(), reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
/** Returns a copy of this with a history record saying it was detected to be down at this instant */
public Node downAt(Instant instant, Agent agent) {
return with(history.with(new History.Event(History.Event.Type.down, agent, instant)));
}
/** Returns a copy of this with any history record saying it has been detected down removed */
public Node upAt(Instant instant, Agent agent) {
return with(history.with(new History.Event(History.Event.Type.up, agent, instant)));
}
/** Returns whether this node is down, according to its recorded 'down' and 'up' events */
/** Returns a copy of this with allocation set as specified. <code>node.state</code> is *not* changed. */
public Node allocate(ApplicationId owner, ClusterMembership membership, NodeResources requestedResources, Instant at) {
return this
.with(new Allocation(owner, membership, requestedResources, new Generation(0, 0), false))
.with(history.with(new History.Event(History.Event.Type.reserved, Agent.application, at)));
}
/**
* Returns a copy of this node with the allocation assigned to the given allocation.
* Do not use this to allocate a node.
*/
public Node with(Allocation allocation) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
Optional.of(allocation), history, type, reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
/** Returns a new Node without an allocation. */
public Node withoutAllocation() {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
Optional.empty(), history, type, reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
/** Returns a copy of this node with IP config set to the given value. */
public Node with(IP.Config ipConfig) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
/** Returns a copy of this node with the parent hostname assigned to the given value. */
public Node withParentHostname(String parentHostname) {
return new Node(id, ipConfig, hostname, Optional.of(parentHostname), flavor, status, state,
allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
public Node withReservedTo(TenantName tenant) {
if (type != NodeType.host)
throw new IllegalArgumentException("Only host nodes can be reserved, " + hostname + " has type " + type);
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, modelName, Optional.of(tenant), exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
/** Returns a copy of this node which is not reserved to a tenant */
public Node withoutReservedTo() {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, modelName, Optional.empty(), exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
public Node withExclusiveToApplicationId(ApplicationId exclusiveTo) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, modelName, reservedTo, Optional.ofNullable(exclusiveTo), exclusiveToClusterType, switchHostname, trustStoreItems);
}
public Node withExclusiveToClusterType(ClusterSpec.Type exclusiveTo) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId, Optional.ofNullable(exclusiveTo), switchHostname, trustStoreItems);
}
/** Returns a copy of this node with switch hostname set to given value */
public Node withSwitchHostname(String switchHostname) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, Optional.ofNullable(switchHostname), trustStoreItems);
}
/** Returns a copy of this node with switch hostname unset */
public Node withoutSwitchHostname() {
return withSwitchHostname(null);
}
/** Returns a copy of this node with the current reboot generation set to the given number at the given instant */
public Node withCurrentRebootGeneration(long generation, Instant instant) {
if (generation < status.reboot().current())
throw new IllegalArgumentException("Cannot set reboot generation to " + generation +
": lower than current generation: " + status.reboot().current());
Status newStatus = status().withReboot(status().reboot().withCurrent(generation));
History newHistory = history.with(new History.Event(History.Event.Type.rebooted, Agent.system, instant));
return this.with(newStatus).with(newHistory);
}
/** Returns a copy of this node with the current OS version set to the given version at the given instant */
public Node withCurrentOsVersion(Version version, Instant instant) {
Optional<Version> newVersion = Optional.of(version);
if (status.osVersion().current().equals(newVersion)) return this;
History newHistory = history();
if (status.osVersion().current().isPresent() && !status.osVersion().current().equals(newVersion)) {
newHistory = history.with(new History.Event(History.Event.Type.osUpgraded, Agent.system, instant));
}
Status newStatus = status.withOsVersion(status.osVersion().withCurrent(newVersion));
return this.with(newStatus).with(newHistory);
}
/** Returns a copy of this node with firmware verified at the given instant */
public Node withFirmwareVerifiedAt(Instant instant) {
var newStatus = status.withFirmwareVerifiedAt(instant);
var newHistory = history.with(new History.Event(History.Event.Type.firmwareVerified, Agent.system, instant));
return this.with(newStatus).with(newHistory);
}
/** Returns a copy of this node with the given history. */
public Node with(History history) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
public Node with(Reports reports) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems);
}
public Node with(List<TrustStoreItem> trustStoreItems) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname,
trustStoreItems);
}
private static Optional<String> requireNonEmptyString(Optional<String> value, String message) {
Objects.requireNonNull(value, message);
value.ifPresent(v -> requireNonEmptyString(v, message));
return value;
}
private static String requireNonEmptyString(String value, String message) {
Objects.requireNonNull(value, message);
if (value.trim().isEmpty())
throw new IllegalArgumentException(message + ", but was '" + value + "'");
return value;
}
private static Set<String> requireNonEmpty(Set<String> values, String message) {
if (values == null || values.isEmpty())
throw new IllegalArgumentException(message);
return values;
}
/** Computes the allocation skew of a host node */
public static double skew(NodeResources totalHostCapacity, NodeResources freeHostCapacity) {
NodeResources all = totalHostCapacity.justNumbers();
NodeResources allocated = all.subtract(freeHostCapacity.justNumbers());
return new Mean(allocated.vcpu() / all.vcpu(),
allocated.memoryGb() / all.memoryGb(),
allocated.diskGb() / all.diskGb())
.deviation();
}
/** Returns the ACL for the node (trusted nodes, networks and ports) */
public NodeAcl acl(NodeList allNodes, LoadBalancers loadBalancers) {
return NodeAcl.from(this, allNodes, loadBalancers);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Node node = (Node) o;
return hostname.equals(node.hostname);
}
@Override
public int hashCode() {
return Objects.hash(hostname);
}
@Override
public String toString() {
return state +
( parentHostname.isPresent() ? " child node " : " host " ) +
hostname +
( allocation.isPresent() ? " " + allocation.get() : "");
}
public enum State {
/** This node has been requested, but is not yet ready for use */
provisioned,
/** This node is free and ready for use */
ready,
/** This node has been reserved by an application but is not yet used by it */
reserved,
/** This node is in active use by an application */
active,
/** This node has been used by an application, is still allocated to it and retains the data needed for its allocated role */
inactive,
/** This node is not allocated to an application but may contain data which must be cleaned before it is ready */
dirty,
/** This node has failed and must be repaired or removed. The node retains any allocation data for diagnosis. */
failed,
/**
* This node should not currently be used.
* This state follows the same rules as failed except that it will never be automatically moved out of
* this state.
*/
parked,
/** This host has previously been in use but is now removed. */
deprovisioned,
/** This host is currently undergoing repair. */
breakfixed;
/** Returns whether this is a state where the node is assigned to an application */
public boolean isAllocated() {
return allocatedStates().contains(this);
}
public static Set<State> allocatedStates() {
return EnumSet.of(reserved, active, inactive, dirty, failed, parked);
}
}
/** The mean and mean deviation (squared difference) of a bunch of numbers */
private static class Mean {
private final double mean;
private final double deviation;
private Mean(double ... numbers) {
mean = Arrays.stream(numbers).sum() / numbers.length;
deviation = Arrays.stream(numbers).map(n -> Math.pow(mean - n, 2)).sum() / numbers.length;
}
public double deviation() { return deviation; }
}
public static class Builder {
private final String id;
private final String hostname;
private final Flavor flavor;
private final State state;
private final NodeType type;
private String parentHostname;
private String modelName;
private TenantName reservedTo;
private ApplicationId exclusiveToApplicationId;
private ClusterSpec.Type exclusiveToClusterType;
private String switchHostname;
private Allocation allocation;
private IP.Config ipConfig;
private Status status;
private Reports reports;
private History history;
private List<TrustStoreItem> trustStoreItems;
private Builder(String id, String hostname, Flavor flavor, State state, NodeType type) {
this.id = id;
this.hostname = hostname;
this.flavor = flavor;
this.state = state;
this.type = type;
}
public Builder parentHostname(String parentHostname) {
this.parentHostname = parentHostname;
return this;
}
public Builder modelName(String modelName) {
this.modelName = modelName;
return this;
}
public Builder reservedTo(TenantName reservedTo) {
this.reservedTo = reservedTo;
return this;
}
public Builder exclusiveToApplicationId(ApplicationId exclusiveTo) {
this.exclusiveToApplicationId = exclusiveTo;
return this;
}
public Builder exclusiveToClusterType(ClusterSpec.Type exclusiveTo) {
this.exclusiveToClusterType = exclusiveTo;
return this;
}
public Builder switchHostname(String switchHostname) {
this.switchHostname = switchHostname;
return this;
}
public Builder allocation(Allocation allocation) {
this.allocation = allocation;
return this;
}
public Builder ipConfig(IP.Config ipConfig) {
this.ipConfig = ipConfig;
return this;
}
public Builder ipConfigWithEmptyPool(Set<String> primary) {
this.ipConfig = IP.Config.ofEmptyPool(primary);
return this;
}
public Builder status(Status status) {
this.status = status;
return this;
}
public Builder reports(Reports reports) {
this.reports = reports;
return this;
}
public Builder history(History history) {
this.history = history;
return this;
}
public Builder trustedCertificates(List<TrustStoreItem> trustStoreItems) {
this.trustStoreItems = trustStoreItems;
return this;
}
public Node build() {
return new Node(id, Optional.ofNullable(ipConfig).orElse(IP.Config.EMPTY), hostname, Optional.ofNullable(parentHostname),
flavor, Optional.ofNullable(status).orElseGet(Status::initial), state, Optional.ofNullable(allocation),
Optional.ofNullable(history).orElseGet(History::empty), type, Optional.ofNullable(reports).orElseGet(Reports::new),
Optional.ofNullable(modelName), Optional.ofNullable(reservedTo), Optional.ofNullable(exclusiveToApplicationId),
Optional.ofNullable(exclusiveToClusterType), Optional.ofNullable(switchHostname),
Optional.ofNullable(trustStoreItems).orElseGet(List::of));
}
}
} |
Are there any guarantees that this will trigger any timeouts at all? Looks like the PR build failed with all responses completing successfully. It looks like all messages are routed to an instance of the `Destination` test mock class, so could instead add functionality to it for deferring response sending. The test could then wait for a client-generated timeout response before allowing the server response to be sent. No need for a retry loop either, since it should be deterministic. | public void requireThatTimeoutWorks() throws InterruptedException {
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<Response> response = new AtomicReference<>();
AsyncSession session = access().createAsyncSession(new AsyncParameters());
DocumentType type = access().getDocumentTypeManager().getDocumentType("music");
Document doc1 = new Document(type, new DocumentId("id:ns:music::1"));
int attempts = 10;
for (int i = 0; ++i <= attempts; ) {
assertTrue(session.put(new DocumentPut(doc1),
DocumentOperationParameters.parameters()
.withResponseHandler(result -> {
response.set(result);
latch.countDown();
})
.withDeadline(Instant.now().plusMillis(40)))
.isSuccess());
assertTrue(latch.await(60, TimeUnit.SECONDS));
if (response.get().outcome() == Outcome.TIMEOUT) break;
if (i == attempts) assertEquals(Response.Outcome.TIMEOUT, response.get().outcome());
}
session.destroy();
} | public void requireThatTimeoutWorks() throws InterruptedException {
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<Response> response = new AtomicReference<>();
AsyncSession session = access().createAsyncSession(new AsyncParameters());
DocumentType type = access().getDocumentTypeManager().getDocumentType("music");
Document doc1 = new Document(type, new DocumentId("id:ns:music::1"));
destination.phaser.register();
assertTrue(session.put(new DocumentPut(doc1),
DocumentOperationParameters.parameters()
.withResponseHandler(result -> {
response.set(result);
latch.countDown();
})
.withDeadline(Instant.now().plusMillis(100)))
.isSuccess());
assertTrue(latch.await(60, TimeUnit.SECONDS));
assertEquals(Response.Outcome.TIMEOUT, response.get().outcome());
destination.phaser.arriveAndDeregister();
session.destroy();
} | class VisitableDestination extends Destination {
private VisitableDestination(String slobrokConfigId, String documentManagerConfigId) {
super(slobrokConfigId, documentManagerConfigId);
}
public void handleMessage(Message msg) {
if (msg.getType() == DocumentProtocol.MESSAGE_CREATEVISITOR) {
Reply reply = ((DocumentMessage)msg).createReply();
msg.swapState(reply);
CreateVisitorReply visitorReply = (CreateVisitorReply)reply;
visitorReply.setLastBucket(ProgressToken.FINISHED_BUCKET);
sendReply(reply);
} else {
super.handleMessage(msg);
}
}
} | class MessageBusDocumentApiTestCase extends AbstractDocumentApiTestCase {
private Slobrok slobrok;
private Destination destination;
private DocumentAccess access;
@Override
protected DocumentAccess access() {
return access;
}
@Before
public void setUp() throws ListenFailedException {
slobrok = new Slobrok();
String slobrokConfigId =
"raw:slobrok[1]\n" + "slobrok[0].connectionspec tcp/localhost:" + slobrok.port() + "\n";
MessageBusParams params = new MessageBusParams();
params.getRPCNetworkParams().setIdentity(new Identity("test/feeder"));
params.getRPCNetworkParams().setSlobrokConfigId(slobrokConfigId);
params.setDocumentManagerConfigId("file:src/test/cfg/documentmanager.cfg");
params.setRouteName("Route");
params.setRouteNameForGet("Route");
params.setRoutingConfigId("file:src/test/cfg/messagebus.cfg");
params.setTraceLevel(9);
access = new MessageBusDocumentAccess(params);
destination = new Destination(slobrokConfigId, params.getDocumentManagerConfigId());
}
@After
public void tearDown() {
access.shutdown();
destination.shutdown();
slobrok.stop();
}
@Test
public void requireThatVisitorSessionWorksWithMessageBus() throws ParseException, InterruptedException {
VisitorParameters parameters = new VisitorParameters("id.user==1234");
parameters.setRoute("Route");
VisitorSession session = ((MessageBusDocumentAccess)access).createVisitorSession(parameters);
boolean ok = session.waitUntilDone(60*5*1000);
assertTrue(ok);
session.destroy();
}
@Test
@Test
public void requireThatDefaultThrottlePolicyIsDynamicAndShared() {
MessageBusAsyncSession mbusSessionA = (MessageBusAsyncSession) access().createAsyncSession(new AsyncParameters());
assertTrue(mbusSessionA.getThrottlePolicy() instanceof DynamicThrottlePolicy);
MessageBusAsyncSession mbusSessionB = (MessageBusAsyncSession) access().createAsyncSession(new AsyncParameters());
assertSame(mbusSessionA.getThrottlePolicy(), mbusSessionB.getThrottlePolicy());
mbusSessionB.destroy();
mbusSessionA.destroy();
}
@Test
public void requireThatThrottlePolicyCanBeConfigured() {
var asyncParams = new AsyncParameters();
ThrottlePolicy allPass = new AllPassThrottlePolicy();
asyncParams.setThrottlePolicy(allPass);
MessageBusAsyncSession mbusSession = (MessageBusAsyncSession) access().createAsyncSession(asyncParams);
assertSame(allPass, mbusSession.getThrottlePolicy());
mbusSession.destroy();
}
} | |
Should we add a future proof `startsWith(ai.vespa.)` as well? Suggest to add a trailing dot to com.yahoo.vespa anyway. | private void addBuildMetaData(File applicationDestination) throws MojoExecutionException {
if ( ! applicationDestination.exists()) return;
if (vespaversion == null)
vespaversion = project.getPlugin("com.yahoo.vespa:vespa-application-maven-plugin").getVersion();
Version compileVersion = Version.from(vespaversion);
MavenProject current = project;
while (current.getParent() != null && current.getParent().getParentArtifact() != null)
current = current.getParent();
boolean hasVespaParent = false;
Artifact parentArtifact = current.getParentArtifact();
if (parentArtifact != null && parentArtifact.getGroupId().startsWith("com.yahoo.vespa")) {
hasVespaParent = true;
Version parentVersion = Version.from(parentArtifact.getVersion());
if (parentVersion.compareTo(compileVersion) < 0)
throw new IllegalArgumentException("compile version (" + compileVersion + ") cannot be higher than parent version (" + parentVersion + ")");
}
String metaData = String.format("{\n \"compileVersion\": \"%s\",\n \"buildTime\": %d,\n \"hasVespaParent\": %b\n}",
compileVersion,
System.currentTimeMillis(),
hasVespaParent);
try {
Files.write(applicationDestination.toPath().resolve("build-meta.json"),
metaData.getBytes(StandardCharsets.UTF_8));
}
catch (IOException e) {
throw new MojoExecutionException("Failed writing compile version and build time.", e);
}
} | if (parentArtifact != null && parentArtifact.getGroupId().startsWith("com.yahoo.vespa")) { | private void addBuildMetaData(File applicationDestination) throws MojoExecutionException {
if ( ! applicationDestination.exists()) return;
if (vespaversion == null)
vespaversion = project.getPlugin("com.yahoo.vespa:vespa-application-maven-plugin").getVersion();
Version compileVersion = Version.from(vespaversion);
MavenProject current = project;
while (current.getParent() != null && current.getParent().getParentArtifact() != null)
current = current.getParent();
boolean hasVespaParent = false;
Artifact parentArtifact = current.getParentArtifact();
if (parentArtifact != null && (parentArtifact.getGroupId().startsWith("com.yahoo.vespa.") || parentArtifact.getGroupId().startsWith("ai.vespa."))) {
hasVespaParent = true;
Version parentVersion = Version.from(parentArtifact.getVersion());
if (parentVersion.compareTo(compileVersion) < 0)
throw new IllegalArgumentException("compile version (" + compileVersion + ") cannot be higher than parent version (" + parentVersion + ")");
}
String metaData = String.format("{\n \"compileVersion\": \"%s\",\n \"buildTime\": %d,\n \"hasVespaParent\": %b\n}",
compileVersion,
System.currentTimeMillis(),
hasVespaParent);
try {
Files.write(applicationDestination.toPath().resolve("build-meta.json"),
metaData.getBytes(StandardCharsets.UTF_8));
}
catch (IOException e) {
throw new MojoExecutionException("Failed writing compile version and build time.", e);
}
} | class ApplicationMojo extends AbstractMojo {
@Parameter(defaultValue = "${project}", readonly = true)
protected MavenProject project;
@Parameter(property = "vespaversion")
private String vespaversion;
@Parameter(defaultValue = "src/main/application")
private String sourceDir;
@Parameter(defaultValue = "target/application")
private String destinationDir;
@Override
public void execute() throws MojoExecutionException {
File applicationPackage = new File(project.getBasedir(), sourceDir);
File applicationDestination = new File(project.getBasedir(), destinationDir);
copyApplicationPackage(applicationPackage, applicationDestination);
addBuildMetaData(applicationDestination);
File componentsDir = createComponentsDir(applicationDestination);
copyModuleBundles(project.getBasedir(), componentsDir);
copyBundlesForSubModules(componentsDir);
try {
Compression.zipDirectory(applicationDestination, "");
} catch (Exception e) {
throw new MojoExecutionException("Failed zipping application.", e);
}
}
/** Writes meta data about this package if the destination directory exists. */
private void copyBundlesForSubModules(File componentsDir) throws MojoExecutionException {
List<String> modules = emptyListIfNull(project.getModules());
for (String module : modules) {
File moduleDir = new File(project.getBasedir(), module);
if (moduleDir.exists()) {
copyModuleBundles(moduleDir, componentsDir);
}
}
}
private File createComponentsDir(File applicationDestination) throws MojoExecutionException {
File componentsDir = new File(applicationDestination, "components");
componentsDir.mkdirs();
if (!componentsDir.exists() || !componentsDir.isDirectory()) {
throw new MojoExecutionException("Failed creating components directory (" + componentsDir + ")");
}
return componentsDir;
}
private void copyApplicationPackage(File applicationPackage, File applicationDestination) throws MojoExecutionException {
if (applicationPackage.exists()) {
try {
FileUtils.copyDirectory(applicationPackage, applicationDestination);
} catch (IOException e) {
throw new MojoExecutionException("Failed copying applicationPackage", e);
}
}
}
private void copyModuleBundles(File moduleDir, File componentsDir) throws MojoExecutionException {
File moduleTargetDir = new File(moduleDir, "target");
if (moduleTargetDir.exists()) {
File[] bundles = moduleTargetDir.listFiles((dir, name) -> name.endsWith("-deploy.jar") ||
name.endsWith("-bundle.jar") ||
name.endsWith("-jar-with-dependencies.jar"));
if (bundles == null) return;
for (File bundle : bundles) {
try {
copyFile(bundle, new File(componentsDir, bundle.getName()));
getLog().info("Copying bundle to application: " + bundle.getName());
} catch (IOException e) {
throw new MojoExecutionException("Failed copying bundle " + bundle, e);
}
}
}
}
private void copyFile(File source, File destination) throws IOException {
try (FileInputStream sourceStream = new FileInputStream(source);
FileOutputStream destinationStream = new FileOutputStream(destination)) {
sourceStream.transferTo(destinationStream);
}
}
private static <T> List<T> emptyListIfNull(List<T> modules) {
return modules == null ? Collections.emptyList(): modules;
}
} | class ApplicationMojo extends AbstractMojo {
@Parameter(defaultValue = "${project}", readonly = true)
protected MavenProject project;
@Parameter(property = "vespaversion")
private String vespaversion;
@Parameter(defaultValue = "src/main/application")
private String sourceDir;
@Parameter(defaultValue = "target/application")
private String destinationDir;
@Override
public void execute() throws MojoExecutionException {
File applicationPackage = new File(project.getBasedir(), sourceDir);
File applicationDestination = new File(project.getBasedir(), destinationDir);
copyApplicationPackage(applicationPackage, applicationDestination);
addBuildMetaData(applicationDestination);
File componentsDir = createComponentsDir(applicationDestination);
copyModuleBundles(project.getBasedir(), componentsDir);
copyBundlesForSubModules(componentsDir);
try {
Compression.zipDirectory(applicationDestination, "");
} catch (Exception e) {
throw new MojoExecutionException("Failed zipping application.", e);
}
}
/** Writes meta data about this package if the destination directory exists. */
private void copyBundlesForSubModules(File componentsDir) throws MojoExecutionException {
List<String> modules = emptyListIfNull(project.getModules());
for (String module : modules) {
File moduleDir = new File(project.getBasedir(), module);
if (moduleDir.exists()) {
copyModuleBundles(moduleDir, componentsDir);
}
}
}
private File createComponentsDir(File applicationDestination) throws MojoExecutionException {
File componentsDir = new File(applicationDestination, "components");
componentsDir.mkdirs();
if (!componentsDir.exists() || !componentsDir.isDirectory()) {
throw new MojoExecutionException("Failed creating components directory (" + componentsDir + ")");
}
return componentsDir;
}
private void copyApplicationPackage(File applicationPackage, File applicationDestination) throws MojoExecutionException {
if (applicationPackage.exists()) {
try {
FileUtils.copyDirectory(applicationPackage, applicationDestination);
} catch (IOException e) {
throw new MojoExecutionException("Failed copying applicationPackage", e);
}
}
}
private void copyModuleBundles(File moduleDir, File componentsDir) throws MojoExecutionException {
File moduleTargetDir = new File(moduleDir, "target");
if (moduleTargetDir.exists()) {
File[] bundles = moduleTargetDir.listFiles((dir, name) -> name.endsWith("-deploy.jar") ||
name.endsWith("-bundle.jar") ||
name.endsWith("-jar-with-dependencies.jar"));
if (bundles == null) return;
for (File bundle : bundles) {
try {
copyFile(bundle, new File(componentsDir, bundle.getName()));
getLog().info("Copying bundle to application: " + bundle.getName());
} catch (IOException e) {
throw new MojoExecutionException("Failed copying bundle " + bundle, e);
}
}
}
}
private void copyFile(File source, File destination) throws IOException {
try (FileInputStream sourceStream = new FileInputStream(source);
FileOutputStream destinationStream = new FileOutputStream(destination)) {
sourceStream.transferTo(destinationStream);
}
}
private static <T> List<T> emptyListIfNull(List<T> modules) {
return modules == null ? Collections.emptyList(): modules;
}
} |
Condition not met isn't transient? | public static Error toError(ResultType result) {
switch (result) {
case TRANSIENT_ERROR:
return new Error(ErrorCode.TRANSIENT_ERROR, ResultType.TRANSIENT_ERROR.name());
case CONDITION_NOT_MET_ERROR:
return new Error(ErrorCode.TRANSIENT_ERROR, ResultType.CONDITION_NOT_MET_ERROR.name());
case FATAL_ERROR:
return new Error(ErrorCode.FATAL_ERROR, ResultType.FATAL_ERROR.name());
}
return new Error(ErrorCode.NONE, "SUCCESS");
} | case CONDITION_NOT_MET_ERROR: | public static Error toError(ResultType result) {
switch (result) {
case TRANSIENT_ERROR:
return new Error(ErrorCode.TRANSIENT_ERROR, ResultType.TRANSIENT_ERROR.name());
case CONDITION_NOT_MET_ERROR:
return new Error(ErrorCode.FATAL_ERROR, ResultType.CONDITION_NOT_MET_ERROR.name());
case FATAL_ERROR:
return new Error(ErrorCode.FATAL_ERROR, ResultType.FATAL_ERROR.name());
}
return new Error(ErrorCode.NONE, "SUCCESS");
} | class Result {
/** Null if this is a success, set to the error occurring if this is a failure */
private final Error error;
/** The id of this operation */
private final long requestId;
private final ResultType type;
/** Creates a successful result with requestId zero */
public Result() {
this(0);
}
/**
* Creates a successful result
*
* @param requestId the ID of the request
*/
public Result(long requestId) {
this.error = null;
this.requestId = requestId;
type = ResultType.SUCCESS;
}
/**
* Creates a unsuccessful result
*
* @deprecated Will be removed on Vespa 8 due to incorrect java.lang.Error
*/
@Deprecated(forRemoval = true, since="7")
public Result(ResultType type, java.lang.Error error) {
this.type = type;
this.error = new Error(0, error.getMessage());
this.requestId = 0;
}
/**
* Creates a unsuccessful result
*
* @param type the type of failure
* @param error the error to encapsulate in this Result
* @see com.yahoo.documentapi.Result.ResultType
*/
public Result(ResultType type, Error error) {
this.type = type;
this.error = error;
this.requestId = 0;
}
/**
* Returns whether this operation is a success.
* If it is a success, the operation is accepted and one or more responses are guaranteed
* to arrive within this sessions timeout limit.
* If this is not a success, this operation has no further consequences.
*
* @return true if success
*/
public boolean isSuccess() { return type == ResultType.SUCCESS; }
/**
* Returns the error causes by this. If this was not a success, this method always returns an error
* If this was a success, this method returns null.
*
* @return the Error, or null
* @deprecated Will be removed on Vespa 8
*/
@Deprecated(forRemoval = true, since="7")
public java.lang.Error getError() { return new java.lang.Error(error.getMessage()); }
public Error error() { return error; }
/**
* Returns the id of this operation. The asynchronous response to this operation
* will contain the same id to allow clients who desire to, to match operations to responses.
*
* @return the id of this operation
*/
public long getRequestId() { return requestId; }
/**
* Returns the type of result.
*
* @return the type of result, typically if this is an error or a success, and what kind of error.
* @see com.yahoo.documentapi.Result.ResultType
*/
public ResultType type() { return type;}
/** The types that a Result can have. */
public enum ResultType {
/** The request was successful, no error information is attached. */
SUCCESS,
/** The request failed, but may be successful if retried at a later time. */
TRANSIENT_ERROR,
/** The request failed, and retrying is pointless. */
FATAL_ERROR,
/** Condition specified in operation not met error */
@Deprecated(since = "7", forRemoval = true)
CONDITION_NOT_MET_ERROR
}
} | class Result {
/** Null if this is a success, set to the error occurring if this is a failure */
private final Error error;
/** The id of this operation */
private final long requestId;
private final ResultType type;
/** Creates a successful result with requestId zero */
public Result() {
this(0);
}
/**
* Creates a successful result
*
* @param requestId the ID of the request
*/
public Result(long requestId) {
this.error = null;
this.requestId = requestId;
type = ResultType.SUCCESS;
}
/**
* Creates a unsuccessful result
*
* @deprecated Will be removed on Vespa 8 due to incorrect java.lang.Error
*/
@Deprecated(forRemoval = true, since="7")
public Result(ResultType type, java.lang.Error error) {
this.type = type;
this.error = new Error(0, error.getMessage());
this.requestId = 0;
}
/**
* Creates a unsuccessful result
*
* @param type the type of failure
* @param error the error to encapsulate in this Result
* @see com.yahoo.documentapi.Result.ResultType
*/
public Result(ResultType type, Error error) {
this.type = type;
this.error = error;
this.requestId = 0;
}
/**
* Returns whether this operation is a success.
* If it is a success, the operation is accepted and one or more responses are guaranteed
* to arrive within this sessions timeout limit.
* If this is not a success, this operation has no further consequences.
*
* @return true if success
*/
public boolean isSuccess() { return type == ResultType.SUCCESS; }
/**
* Returns the error causes by this. If this was not a success, this method always returns an error
* If this was a success, this method returns null.
*
* @return the Error, or null
* @deprecated Will be removed on Vespa 8
*/
@Deprecated(forRemoval = true, since="7")
public java.lang.Error getError() { return new java.lang.Error(error.getMessage()); }
public Error error() { return error; }
/**
* Returns the id of this operation. The asynchronous response to this operation
* will contain the same id to allow clients who desire to, to match operations to responses.
*
* @return the id of this operation
*/
public long getRequestId() { return requestId; }
/**
* Returns the type of result.
*
* @return the type of result, typically if this is an error or a success, and what kind of error.
* @see com.yahoo.documentapi.Result.ResultType
*/
public ResultType type() { return type;}
/** The types that a Result can have. */
public enum ResultType {
/** The request was successful, no error information is attached. */
SUCCESS,
/** The request failed, but may be successful if retried at a later time. */
TRANSIENT_ERROR,
/** The request failed, and retrying is pointless. */
FATAL_ERROR,
/** Condition specified in operation not met error */
@Deprecated(since = "7", forRemoval = true)
CONDITION_NOT_MET_ERROR
}
} |
No you will need to change your condition for it to have a bigger chance at success, and anyway it should not be here. See comment in Result.ResultType. | public static Error toError(ResultType result) {
switch (result) {
case TRANSIENT_ERROR:
return new Error(ErrorCode.TRANSIENT_ERROR, ResultType.TRANSIENT_ERROR.name());
case CONDITION_NOT_MET_ERROR:
return new Error(ErrorCode.TRANSIENT_ERROR, ResultType.CONDITION_NOT_MET_ERROR.name());
case FATAL_ERROR:
return new Error(ErrorCode.FATAL_ERROR, ResultType.FATAL_ERROR.name());
}
return new Error(ErrorCode.NONE, "SUCCESS");
} | case CONDITION_NOT_MET_ERROR: | public static Error toError(ResultType result) {
switch (result) {
case TRANSIENT_ERROR:
return new Error(ErrorCode.TRANSIENT_ERROR, ResultType.TRANSIENT_ERROR.name());
case CONDITION_NOT_MET_ERROR:
return new Error(ErrorCode.FATAL_ERROR, ResultType.CONDITION_NOT_MET_ERROR.name());
case FATAL_ERROR:
return new Error(ErrorCode.FATAL_ERROR, ResultType.FATAL_ERROR.name());
}
return new Error(ErrorCode.NONE, "SUCCESS");
} | class Result {
/** Null if this is a success, set to the error occurring if this is a failure */
private final Error error;
/** The id of this operation */
private final long requestId;
private final ResultType type;
/** Creates a successful result with requestId zero */
public Result() {
this(0);
}
/**
* Creates a successful result
*
* @param requestId the ID of the request
*/
public Result(long requestId) {
this.error = null;
this.requestId = requestId;
type = ResultType.SUCCESS;
}
/**
* Creates a unsuccessful result
*
* @deprecated Will be removed on Vespa 8 due to incorrect java.lang.Error
*/
@Deprecated(forRemoval = true, since="7")
public Result(ResultType type, java.lang.Error error) {
this.type = type;
this.error = new Error(0, error.getMessage());
this.requestId = 0;
}
/**
* Creates a unsuccessful result
*
* @param type the type of failure
* @param error the error to encapsulate in this Result
* @see com.yahoo.documentapi.Result.ResultType
*/
public Result(ResultType type, Error error) {
this.type = type;
this.error = error;
this.requestId = 0;
}
/**
* Returns whether this operation is a success.
* If it is a success, the operation is accepted and one or more responses are guaranteed
* to arrive within this sessions timeout limit.
* If this is not a success, this operation has no further consequences.
*
* @return true if success
*/
public boolean isSuccess() { return type == ResultType.SUCCESS; }
/**
* Returns the error causes by this. If this was not a success, this method always returns an error
* If this was a success, this method returns null.
*
* @return the Error, or null
* @deprecated Will be removed on Vespa 8
*/
@Deprecated(forRemoval = true, since="7")
public java.lang.Error getError() { return new java.lang.Error(error.getMessage()); }
public Error error() { return error; }
/**
* Returns the id of this operation. The asynchronous response to this operation
* will contain the same id to allow clients who desire to, to match operations to responses.
*
* @return the id of this operation
*/
public long getRequestId() { return requestId; }
/**
* Returns the type of result.
*
* @return the type of result, typically if this is an error or a success, and what kind of error.
* @see com.yahoo.documentapi.Result.ResultType
*/
public ResultType type() { return type;}
/** The types that a Result can have. */
public enum ResultType {
/** The request was successful, no error information is attached. */
SUCCESS,
/** The request failed, but may be successful if retried at a later time. */
TRANSIENT_ERROR,
/** The request failed, and retrying is pointless. */
FATAL_ERROR,
/** Condition specified in operation not met error */
@Deprecated(since = "7", forRemoval = true)
CONDITION_NOT_MET_ERROR
}
} | class Result {
/** Null if this is a success, set to the error occurring if this is a failure */
private final Error error;
/** The id of this operation */
private final long requestId;
private final ResultType type;
/** Creates a successful result with requestId zero */
public Result() {
this(0);
}
/**
* Creates a successful result
*
* @param requestId the ID of the request
*/
public Result(long requestId) {
this.error = null;
this.requestId = requestId;
type = ResultType.SUCCESS;
}
/**
* Creates a unsuccessful result
*
* @deprecated Will be removed on Vespa 8 due to incorrect java.lang.Error
*/
@Deprecated(forRemoval = true, since="7")
public Result(ResultType type, java.lang.Error error) {
this.type = type;
this.error = new Error(0, error.getMessage());
this.requestId = 0;
}
/**
* Creates a unsuccessful result
*
* @param type the type of failure
* @param error the error to encapsulate in this Result
* @see com.yahoo.documentapi.Result.ResultType
*/
public Result(ResultType type, Error error) {
this.type = type;
this.error = error;
this.requestId = 0;
}
/**
* Returns whether this operation is a success.
* If it is a success, the operation is accepted and one or more responses are guaranteed
* to arrive within this sessions timeout limit.
* If this is not a success, this operation has no further consequences.
*
* @return true if success
*/
public boolean isSuccess() { return type == ResultType.SUCCESS; }
/**
* Returns the error causes by this. If this was not a success, this method always returns an error
* If this was a success, this method returns null.
*
* @return the Error, or null
* @deprecated Will be removed on Vespa 8
*/
@Deprecated(forRemoval = true, since="7")
public java.lang.Error getError() { return new java.lang.Error(error.getMessage()); }
public Error error() { return error; }
/**
* Returns the id of this operation. The asynchronous response to this operation
* will contain the same id to allow clients who desire to, to match operations to responses.
*
* @return the id of this operation
*/
public long getRequestId() { return requestId; }
/**
* Returns the type of result.
*
* @return the type of result, typically if this is an error or a success, and what kind of error.
* @see com.yahoo.documentapi.Result.ResultType
*/
public ResultType type() { return type;}
/** The types that a Result can have. */
public enum ResultType {
/** The request was successful, no error information is attached. */
SUCCESS,
/** The request failed, but may be successful if retried at a later time. */
TRANSIENT_ERROR,
/** The request failed, and retrying is pointless. */
FATAL_ERROR,
/** Condition specified in operation not met error */
@Deprecated(since = "7", forRemoval = true)
CONDITION_NOT_MET_ERROR
}
} |
Good points, fixed. | private void addBuildMetaData(File applicationDestination) throws MojoExecutionException {
if ( ! applicationDestination.exists()) return;
if (vespaversion == null)
vespaversion = project.getPlugin("com.yahoo.vespa:vespa-application-maven-plugin").getVersion();
Version compileVersion = Version.from(vespaversion);
MavenProject current = project;
while (current.getParent() != null && current.getParent().getParentArtifact() != null)
current = current.getParent();
boolean hasVespaParent = false;
Artifact parentArtifact = current.getParentArtifact();
if (parentArtifact != null && parentArtifact.getGroupId().startsWith("com.yahoo.vespa")) {
hasVespaParent = true;
Version parentVersion = Version.from(parentArtifact.getVersion());
if (parentVersion.compareTo(compileVersion) < 0)
throw new IllegalArgumentException("compile version (" + compileVersion + ") cannot be higher than parent version (" + parentVersion + ")");
}
String metaData = String.format("{\n \"compileVersion\": \"%s\",\n \"buildTime\": %d,\n \"hasVespaParent\": %b\n}",
compileVersion,
System.currentTimeMillis(),
hasVespaParent);
try {
Files.write(applicationDestination.toPath().resolve("build-meta.json"),
metaData.getBytes(StandardCharsets.UTF_8));
}
catch (IOException e) {
throw new MojoExecutionException("Failed writing compile version and build time.", e);
}
} | if (parentArtifact != null && parentArtifact.getGroupId().startsWith("com.yahoo.vespa")) { | private void addBuildMetaData(File applicationDestination) throws MojoExecutionException {
if ( ! applicationDestination.exists()) return;
if (vespaversion == null)
vespaversion = project.getPlugin("com.yahoo.vespa:vespa-application-maven-plugin").getVersion();
Version compileVersion = Version.from(vespaversion);
MavenProject current = project;
while (current.getParent() != null && current.getParent().getParentArtifact() != null)
current = current.getParent();
boolean hasVespaParent = false;
Artifact parentArtifact = current.getParentArtifact();
if (parentArtifact != null && (parentArtifact.getGroupId().startsWith("com.yahoo.vespa.") || parentArtifact.getGroupId().startsWith("ai.vespa."))) {
hasVespaParent = true;
Version parentVersion = Version.from(parentArtifact.getVersion());
if (parentVersion.compareTo(compileVersion) < 0)
throw new IllegalArgumentException("compile version (" + compileVersion + ") cannot be higher than parent version (" + parentVersion + ")");
}
String metaData = String.format("{\n \"compileVersion\": \"%s\",\n \"buildTime\": %d,\n \"hasVespaParent\": %b\n}",
compileVersion,
System.currentTimeMillis(),
hasVespaParent);
try {
Files.write(applicationDestination.toPath().resolve("build-meta.json"),
metaData.getBytes(StandardCharsets.UTF_8));
}
catch (IOException e) {
throw new MojoExecutionException("Failed writing compile version and build time.", e);
}
} | class ApplicationMojo extends AbstractMojo {
@Parameter(defaultValue = "${project}", readonly = true)
protected MavenProject project;
@Parameter(property = "vespaversion")
private String vespaversion;
@Parameter(defaultValue = "src/main/application")
private String sourceDir;
@Parameter(defaultValue = "target/application")
private String destinationDir;
@Override
public void execute() throws MojoExecutionException {
File applicationPackage = new File(project.getBasedir(), sourceDir);
File applicationDestination = new File(project.getBasedir(), destinationDir);
copyApplicationPackage(applicationPackage, applicationDestination);
addBuildMetaData(applicationDestination);
File componentsDir = createComponentsDir(applicationDestination);
copyModuleBundles(project.getBasedir(), componentsDir);
copyBundlesForSubModules(componentsDir);
try {
Compression.zipDirectory(applicationDestination, "");
} catch (Exception e) {
throw new MojoExecutionException("Failed zipping application.", e);
}
}
/** Writes meta data about this package if the destination directory exists. */
private void copyBundlesForSubModules(File componentsDir) throws MojoExecutionException {
List<String> modules = emptyListIfNull(project.getModules());
for (String module : modules) {
File moduleDir = new File(project.getBasedir(), module);
if (moduleDir.exists()) {
copyModuleBundles(moduleDir, componentsDir);
}
}
}
private File createComponentsDir(File applicationDestination) throws MojoExecutionException {
File componentsDir = new File(applicationDestination, "components");
componentsDir.mkdirs();
if (!componentsDir.exists() || !componentsDir.isDirectory()) {
throw new MojoExecutionException("Failed creating components directory (" + componentsDir + ")");
}
return componentsDir;
}
private void copyApplicationPackage(File applicationPackage, File applicationDestination) throws MojoExecutionException {
if (applicationPackage.exists()) {
try {
FileUtils.copyDirectory(applicationPackage, applicationDestination);
} catch (IOException e) {
throw new MojoExecutionException("Failed copying applicationPackage", e);
}
}
}
private void copyModuleBundles(File moduleDir, File componentsDir) throws MojoExecutionException {
File moduleTargetDir = new File(moduleDir, "target");
if (moduleTargetDir.exists()) {
File[] bundles = moduleTargetDir.listFiles((dir, name) -> name.endsWith("-deploy.jar") ||
name.endsWith("-bundle.jar") ||
name.endsWith("-jar-with-dependencies.jar"));
if (bundles == null) return;
for (File bundle : bundles) {
try {
copyFile(bundle, new File(componentsDir, bundle.getName()));
getLog().info("Copying bundle to application: " + bundle.getName());
} catch (IOException e) {
throw new MojoExecutionException("Failed copying bundle " + bundle, e);
}
}
}
}
private void copyFile(File source, File destination) throws IOException {
try (FileInputStream sourceStream = new FileInputStream(source);
FileOutputStream destinationStream = new FileOutputStream(destination)) {
sourceStream.transferTo(destinationStream);
}
}
private static <T> List<T> emptyListIfNull(List<T> modules) {
return modules == null ? Collections.emptyList(): modules;
}
} | class ApplicationMojo extends AbstractMojo {
@Parameter(defaultValue = "${project}", readonly = true)
protected MavenProject project;
@Parameter(property = "vespaversion")
private String vespaversion;
@Parameter(defaultValue = "src/main/application")
private String sourceDir;
@Parameter(defaultValue = "target/application")
private String destinationDir;
@Override
public void execute() throws MojoExecutionException {
File applicationPackage = new File(project.getBasedir(), sourceDir);
File applicationDestination = new File(project.getBasedir(), destinationDir);
copyApplicationPackage(applicationPackage, applicationDestination);
addBuildMetaData(applicationDestination);
File componentsDir = createComponentsDir(applicationDestination);
copyModuleBundles(project.getBasedir(), componentsDir);
copyBundlesForSubModules(componentsDir);
try {
Compression.zipDirectory(applicationDestination, "");
} catch (Exception e) {
throw new MojoExecutionException("Failed zipping application.", e);
}
}
/** Writes meta data about this package if the destination directory exists. */
private void copyBundlesForSubModules(File componentsDir) throws MojoExecutionException {
List<String> modules = emptyListIfNull(project.getModules());
for (String module : modules) {
File moduleDir = new File(project.getBasedir(), module);
if (moduleDir.exists()) {
copyModuleBundles(moduleDir, componentsDir);
}
}
}
private File createComponentsDir(File applicationDestination) throws MojoExecutionException {
File componentsDir = new File(applicationDestination, "components");
componentsDir.mkdirs();
if (!componentsDir.exists() || !componentsDir.isDirectory()) {
throw new MojoExecutionException("Failed creating components directory (" + componentsDir + ")");
}
return componentsDir;
}
private void copyApplicationPackage(File applicationPackage, File applicationDestination) throws MojoExecutionException {
if (applicationPackage.exists()) {
try {
FileUtils.copyDirectory(applicationPackage, applicationDestination);
} catch (IOException e) {
throw new MojoExecutionException("Failed copying applicationPackage", e);
}
}
}
private void copyModuleBundles(File moduleDir, File componentsDir) throws MojoExecutionException {
File moduleTargetDir = new File(moduleDir, "target");
if (moduleTargetDir.exists()) {
File[] bundles = moduleTargetDir.listFiles((dir, name) -> name.endsWith("-deploy.jar") ||
name.endsWith("-bundle.jar") ||
name.endsWith("-jar-with-dependencies.jar"));
if (bundles == null) return;
for (File bundle : bundles) {
try {
copyFile(bundle, new File(componentsDir, bundle.getName()));
getLog().info("Copying bundle to application: " + bundle.getName());
} catch (IOException e) {
throw new MojoExecutionException("Failed copying bundle " + bundle, e);
}
}
}
}
private void copyFile(File source, File destination) throws IOException {
try (FileInputStream sourceStream = new FileInputStream(source);
FileOutputStream destinationStream = new FileOutputStream(destination)) {
sourceStream.transferTo(destinationStream);
}
}
private static <T> List<T> emptyListIfNull(List<T> modules) {
return modules == null ? Collections.emptyList(): modules;
}
} |
Use `!source.isProduction()`? | private boolean skipSource(NotificationSource source) {
if (source.zoneId().map(z -> z.environment() != Environment.prod).orElse(false)) {
return true;
} else if (source.jobType().map(t -> !t.isProduction()).orElse(false)) {
return true;
}
return false;
} | return false; | private boolean skipSource(NotificationSource source) {
if (source.zoneId().map(z -> z.environment() != Environment.prod).orElse(false)) {
return true;
} else if (source.jobType().map(t -> !t.isProduction()).orElse(false)) {
return true;
}
return false;
} | class Notifier {
private final CuratorDb curatorDb;
private final Mailer mailer;
private static final Logger log = Logger.getLogger(Notifier.class.getName());
public Notifier(CuratorDb curatorDb, Mailer mailer) {
this.curatorDb = Objects.requireNonNull(curatorDb);
this.mailer = Objects.requireNonNull(mailer);
}
public void dispatch(List<Notification> notifications, NotificationSource source) {
if (notifications.isEmpty()) {
return;
}
if (skipSource(source)) {
return;
}
var tenant = curatorDb.readTenant(source.tenant());
tenant.stream().forEach(t -> {
if (t instanceof CloudTenant) {
var ct = (CloudTenant) t;
ct.info().contacts().all().stream()
.filter(c -> c.audiences().contains(TenantContacts.Audience.NOTIFICATIONS))
.collect(Collectors.groupingBy(TenantContacts.Contact::type, Collectors.toList()))
.entrySet()
.forEach(e -> notifications.forEach(n -> dispatch(n, e.getKey(), e.getValue())));
}
});
}
public void dispatch(Notification notification) {
dispatch(List.of(notification), notification.source());
}
private void dispatch(Notification notification, TenantContacts.Type type, Collection<? extends TenantContacts.Contact> contacts) {
switch (type) {
case EMAIL:
dispatch(notification, contacts.stream().map(c -> (TenantContacts.EmailContact) c).collect(Collectors.toList()));
break;
default:
throw new IllegalArgumentException("Unknown TenantContacts type " + type.name());
}
}
private void dispatch(Notification notification, Collection<TenantContacts.EmailContact> contacts) {
try {
mailer.send(mailOf(notification, contacts.stream().map(c -> c.email()).collect(Collectors.toList())));
} catch (MailerException e) {
log.log(Level.SEVERE, "Failed sending email", e);
}
}
private Mail mailOf(Notification n, Collection<String> recipients) {
var subject = Text.format("[%s] Vespa Notification for %s", n.level().toString().toUpperCase(), n.type().name());
var body = new StringBuilder();
body.append("Source: ").append(n.source().toString()).append("\n")
.append("\n")
.append(String.join("\n", n.messages()));
return new Mail(recipients, subject.toString(), body.toString());
}
} | class Notifier {
private final CuratorDb curatorDb;
private final ZoneRegistry zoneRegistry;
private final Mailer mailer;
private static final Logger log = Logger.getLogger(Notifier.class.getName());
public Notifier(CuratorDb curatorDb, ZoneRegistry zoneRegistry, Mailer mailer) {
this.curatorDb = Objects.requireNonNull(curatorDb);
this.zoneRegistry = Objects.requireNonNull(zoneRegistry);
this.mailer = Objects.requireNonNull(mailer);
}
public void dispatch(List<Notification> notifications, NotificationSource source) {
if (notifications.isEmpty()) {
return;
}
if (skipSource(source)) {
return;
}
var tenant = curatorDb.readTenant(source.tenant());
tenant.stream().forEach(t -> {
if (t instanceof CloudTenant) {
var ct = (CloudTenant) t;
ct.info().contacts().all().stream()
.filter(c -> c.audiences().contains(TenantContacts.Audience.NOTIFICATIONS))
.collect(Collectors.groupingBy(TenantContacts.Contact::type, Collectors.toList()))
.entrySet()
.forEach(e -> notifications.forEach(n -> dispatch(n, e.getKey(), e.getValue())));
}
});
}
public void dispatch(Notification notification) {
dispatch(List.of(notification), notification.source());
}
private void dispatch(Notification notification, TenantContacts.Type type, Collection<? extends TenantContacts.Contact> contacts) {
switch (type) {
case EMAIL:
dispatch(notification, contacts.stream().map(c -> (TenantContacts.EmailContact) c).collect(Collectors.toList()));
break;
default:
throw new IllegalArgumentException("Unknown TenantContacts type " + type.name());
}
}
private void dispatch(Notification notification, Collection<TenantContacts.EmailContact> contacts) {
try {
mailer.send(mailOf(notification, contacts.stream().map(c -> c.email()).collect(Collectors.toList())));
} catch (MailerException e) {
log.log(Level.SEVERE, "Failed sending email", e);
}
}
private Mail mailOf(Notification n, Collection<String> recipients) {
var source = n.source();
var subject = Text.format("[%s] %s Vespa Notification for %s - %s", n.level().toString().toUpperCase(), n.type().name(), source.tenant(), source.application());
var body = new StringBuilder();
body.append("Source: ").append(n.source().toString()).append("\n")
.append("\n")
.append(String.join("\n", n.messages()))
.append("\n")
.append(url(source).toString());
return new Mail(recipients, subject.toString(), body.toString());
}
private URI url(NotificationSource source) {
if (source.application().isPresent() && source.instance().isPresent()) {
if (source.jobType().isPresent() && source.runNumber().isPresent()) {
return zoneRegistry.dashboardUrl(
new RunId(ApplicationId.from(source.tenant(),
source.application().get(),
source.instance().get()),
source.jobType().get(),
source.runNumber().getAsLong()));
}
return zoneRegistry.dashboardUrl(ApplicationId.from(source.tenant(), source.application().get(), source.instance().get()));
}
return zoneRegistry.dashboardUrl(source.tenant());
}
} |
I would like to exclude staging and systemtest for now. Too many emails in the beginning will just be annoying. | private boolean skipSource(NotificationSource source) {
if (source.zoneId().map(z -> z.environment() != Environment.prod).orElse(false)) {
return true;
} else if (source.jobType().map(t -> !t.isProduction()).orElse(false)) {
return true;
}
return false;
} | return false; | private boolean skipSource(NotificationSource source) {
if (source.zoneId().map(z -> z.environment() != Environment.prod).orElse(false)) {
return true;
} else if (source.jobType().map(t -> !t.isProduction()).orElse(false)) {
return true;
}
return false;
} | class Notifier {
private final CuratorDb curatorDb;
private final Mailer mailer;
private static final Logger log = Logger.getLogger(Notifier.class.getName());
public Notifier(CuratorDb curatorDb, Mailer mailer) {
this.curatorDb = Objects.requireNonNull(curatorDb);
this.mailer = Objects.requireNonNull(mailer);
}
public void dispatch(List<Notification> notifications, NotificationSource source) {
if (notifications.isEmpty()) {
return;
}
if (skipSource(source)) {
return;
}
var tenant = curatorDb.readTenant(source.tenant());
tenant.stream().forEach(t -> {
if (t instanceof CloudTenant) {
var ct = (CloudTenant) t;
ct.info().contacts().all().stream()
.filter(c -> c.audiences().contains(TenantContacts.Audience.NOTIFICATIONS))
.collect(Collectors.groupingBy(TenantContacts.Contact::type, Collectors.toList()))
.entrySet()
.forEach(e -> notifications.forEach(n -> dispatch(n, e.getKey(), e.getValue())));
}
});
}
public void dispatch(Notification notification) {
dispatch(List.of(notification), notification.source());
}
private void dispatch(Notification notification, TenantContacts.Type type, Collection<? extends TenantContacts.Contact> contacts) {
switch (type) {
case EMAIL:
dispatch(notification, contacts.stream().map(c -> (TenantContacts.EmailContact) c).collect(Collectors.toList()));
break;
default:
throw new IllegalArgumentException("Unknown TenantContacts type " + type.name());
}
}
private void dispatch(Notification notification, Collection<TenantContacts.EmailContact> contacts) {
try {
mailer.send(mailOf(notification, contacts.stream().map(c -> c.email()).collect(Collectors.toList())));
} catch (MailerException e) {
log.log(Level.SEVERE, "Failed sending email", e);
}
}
private Mail mailOf(Notification n, Collection<String> recipients) {
var subject = Text.format("[%s] Vespa Notification for %s", n.level().toString().toUpperCase(), n.type().name());
var body = new StringBuilder();
body.append("Source: ").append(n.source().toString()).append("\n")
.append("\n")
.append(String.join("\n", n.messages()));
return new Mail(recipients, subject.toString(), body.toString());
}
} | class Notifier {
private final CuratorDb curatorDb;
private final ZoneRegistry zoneRegistry;
private final Mailer mailer;
private static final Logger log = Logger.getLogger(Notifier.class.getName());
public Notifier(CuratorDb curatorDb, ZoneRegistry zoneRegistry, Mailer mailer) {
this.curatorDb = Objects.requireNonNull(curatorDb);
this.zoneRegistry = Objects.requireNonNull(zoneRegistry);
this.mailer = Objects.requireNonNull(mailer);
}
public void dispatch(List<Notification> notifications, NotificationSource source) {
if (notifications.isEmpty()) {
return;
}
if (skipSource(source)) {
return;
}
var tenant = curatorDb.readTenant(source.tenant());
tenant.stream().forEach(t -> {
if (t instanceof CloudTenant) {
var ct = (CloudTenant) t;
ct.info().contacts().all().stream()
.filter(c -> c.audiences().contains(TenantContacts.Audience.NOTIFICATIONS))
.collect(Collectors.groupingBy(TenantContacts.Contact::type, Collectors.toList()))
.entrySet()
.forEach(e -> notifications.forEach(n -> dispatch(n, e.getKey(), e.getValue())));
}
});
}
public void dispatch(Notification notification) {
dispatch(List.of(notification), notification.source());
}
private void dispatch(Notification notification, TenantContacts.Type type, Collection<? extends TenantContacts.Contact> contacts) {
switch (type) {
case EMAIL:
dispatch(notification, contacts.stream().map(c -> (TenantContacts.EmailContact) c).collect(Collectors.toList()));
break;
default:
throw new IllegalArgumentException("Unknown TenantContacts type " + type.name());
}
}
private void dispatch(Notification notification, Collection<TenantContacts.EmailContact> contacts) {
try {
mailer.send(mailOf(notification, contacts.stream().map(c -> c.email()).collect(Collectors.toList())));
} catch (MailerException e) {
log.log(Level.SEVERE, "Failed sending email", e);
}
}
private Mail mailOf(Notification n, Collection<String> recipients) {
var source = n.source();
var subject = Text.format("[%s] %s Vespa Notification for %s - %s", n.level().toString().toUpperCase(), n.type().name(), source.tenant(), source.application());
var body = new StringBuilder();
body.append("Source: ").append(n.source().toString()).append("\n")
.append("\n")
.append(String.join("\n", n.messages()))
.append("\n")
.append(url(source).toString());
return new Mail(recipients, subject.toString(), body.toString());
}
private URI url(NotificationSource source) {
if (source.application().isPresent() && source.instance().isPresent()) {
if (source.jobType().isPresent() && source.runNumber().isPresent()) {
return zoneRegistry.dashboardUrl(
new RunId(ApplicationId.from(source.tenant(),
source.application().get(),
source.instance().get()),
source.jobType().get(),
source.runNumber().getAsLong()));
}
return zoneRegistry.dashboardUrl(ApplicationId.from(source.tenant(), source.application().get(), source.instance().get()));
}
return zoneRegistry.dashboardUrl(source.tenant());
}
} |
Using Integer here, but Long in other places? I don't understand what risk as a number specifies is either, but that is perhaps documented somewhere? | public void doExecute() {
applicationZip = firstNonBlank(applicationZip, projectPathOf("target", "application.zip")).orElseThrow();
applicationTestZip = firstNonBlank(applicationTestZip, projectPathOf("target", "application-test.zip")).orElseThrow();
Submission submission = new Submission(optionalOf(repository), optionalOf(branch), optionalOf(commit),
optionalOf(sourceUrl), optionalOf(authorEmail),
Paths.get(applicationZip), Paths.get(applicationTestZip),
optionalOf(projectId, Long::parseLong), optionalOf(risk, Integer::parseInt),
optionalOf(description));
getLog().info(controller.submit(submission, id.tenant(), id.application()));
} | optionalOf(projectId, Long::parseLong), optionalOf(risk, Integer::parseInt), | public void doExecute() {
applicationZip = firstNonBlank(applicationZip, projectPathOf("target", "application.zip")).orElseThrow();
applicationTestZip = firstNonBlank(applicationTestZip, projectPathOf("target", "application-test.zip")).orElseThrow();
Submission submission = new Submission(optionalOf(repository), optionalOf(branch), optionalOf(commit),
optionalOf(sourceUrl), optionalOf(authorEmail),
Paths.get(applicationZip), Paths.get(applicationTestZip),
optionalOf(projectId, Long::parseLong), optionalOf(risk, Integer::parseInt),
optionalOf(description));
getLog().info(controller.submit(submission, id.tenant(), id.application()));
} | class SubmitMojo extends AbstractVespaMojo {
@Parameter(property = "applicationZip")
private String applicationZip;
@Parameter(property = "applicationTestZip")
private String applicationTestZip;
@Parameter(property = "authorEmail")
private String authorEmail;
@Parameter(property = "repository")
private String repository;
@Parameter(property = "branch")
private String branch;
@Parameter(property = "commit")
private String commit;
@Parameter(property = "sourceUrl")
private String sourceUrl;
@Parameter(property = "projectId")
private String projectId;
@Parameter(property = "risk")
private String risk;
@Parameter(property = "description")
private String description;
@Override
} | class SubmitMojo extends AbstractVespaMojo {
@Parameter(property = "applicationZip")
private String applicationZip;
@Parameter(property = "applicationTestZip")
private String applicationTestZip;
@Parameter(property = "authorEmail")
private String authorEmail;
@Parameter(property = "repository")
private String repository;
@Parameter(property = "branch")
private String branch;
@Parameter(property = "commit")
private String commit;
@Parameter(property = "sourceUrl")
private String sourceUrl;
@Parameter(property = "projectId")
private String projectId;
@Parameter(property = "risk")
private String risk;
@Parameter(property = "description")
private String description;
@Override
} |
Can't let people get too risky, you know 😛 It will be documented at cloud.vespa.ai/en/reference/vespa-cloud-api. It's a fringe setting! | public void doExecute() {
applicationZip = firstNonBlank(applicationZip, projectPathOf("target", "application.zip")).orElseThrow();
applicationTestZip = firstNonBlank(applicationTestZip, projectPathOf("target", "application-test.zip")).orElseThrow();
Submission submission = new Submission(optionalOf(repository), optionalOf(branch), optionalOf(commit),
optionalOf(sourceUrl), optionalOf(authorEmail),
Paths.get(applicationZip), Paths.get(applicationTestZip),
optionalOf(projectId, Long::parseLong), optionalOf(risk, Integer::parseInt),
optionalOf(description));
getLog().info(controller.submit(submission, id.tenant(), id.application()));
} | optionalOf(projectId, Long::parseLong), optionalOf(risk, Integer::parseInt), | public void doExecute() {
applicationZip = firstNonBlank(applicationZip, projectPathOf("target", "application.zip")).orElseThrow();
applicationTestZip = firstNonBlank(applicationTestZip, projectPathOf("target", "application-test.zip")).orElseThrow();
Submission submission = new Submission(optionalOf(repository), optionalOf(branch), optionalOf(commit),
optionalOf(sourceUrl), optionalOf(authorEmail),
Paths.get(applicationZip), Paths.get(applicationTestZip),
optionalOf(projectId, Long::parseLong), optionalOf(risk, Integer::parseInt),
optionalOf(description));
getLog().info(controller.submit(submission, id.tenant(), id.application()));
} | class SubmitMojo extends AbstractVespaMojo {
@Parameter(property = "applicationZip")
private String applicationZip;
@Parameter(property = "applicationTestZip")
private String applicationTestZip;
@Parameter(property = "authorEmail")
private String authorEmail;
@Parameter(property = "repository")
private String repository;
@Parameter(property = "branch")
private String branch;
@Parameter(property = "commit")
private String commit;
@Parameter(property = "sourceUrl")
private String sourceUrl;
@Parameter(property = "projectId")
private String projectId;
@Parameter(property = "risk")
private String risk;
@Parameter(property = "description")
private String description;
@Override
} | class SubmitMojo extends AbstractVespaMojo {
@Parameter(property = "applicationZip")
private String applicationZip;
@Parameter(property = "applicationTestZip")
private String applicationTestZip;
@Parameter(property = "authorEmail")
private String authorEmail;
@Parameter(property = "repository")
private String repository;
@Parameter(property = "branch")
private String branch;
@Parameter(property = "commit")
private String commit;
@Parameter(property = "sourceUrl")
private String sourceUrl;
@Parameter(property = "projectId")
private String projectId;
@Parameter(property = "risk")
private String risk;
@Parameter(property = "description")
private String description;
@Override
} |
It broken because the trailing `.` was added ... | static boolean isVespaParent(String groupId) {
return groupId.matches("(com\\.yahoo\\.vespa|ai\\.vespa)(\\..+)?");
} | return groupId.matches("(com\\.yahoo\\.vespa|ai\\.vespa)(\\..+)?"); | static boolean isVespaParent(String groupId) {
return groupId.matches("(com\\.yahoo\\.vespa|ai\\.vespa)(\\..+)?");
} | class ApplicationMojo extends AbstractMojo {
@Parameter(defaultValue = "${project}", readonly = true)
protected MavenProject project;
@Parameter(property = "vespaversion")
private String vespaversion;
@Parameter(defaultValue = "src/main/application")
private String sourceDir;
@Parameter(defaultValue = "target/application")
private String destinationDir;
@Override
public void execute() throws MojoExecutionException {
File applicationPackage = new File(project.getBasedir(), sourceDir);
File applicationDestination = new File(project.getBasedir(), destinationDir);
copyApplicationPackage(applicationPackage, applicationDestination);
addBuildMetaData(applicationDestination);
File componentsDir = createComponentsDir(applicationDestination);
copyModuleBundles(project.getBasedir(), componentsDir);
copyBundlesForSubModules(componentsDir);
try {
Compression.zipDirectory(applicationDestination, "");
} catch (Exception e) {
throw new MojoExecutionException("Failed zipping application.", e);
}
}
/** Writes meta data about this package if the destination directory exists. */
private void addBuildMetaData(File applicationDestination) throws MojoExecutionException {
if ( ! applicationDestination.exists()) return;
if (vespaversion == null)
vespaversion = project.getPlugin("com.yahoo.vespa:vespa-application-maven-plugin").getVersion();
Version compileVersion = Version.from(vespaversion);
MavenProject current = project;
while (current.getParent() != null && current.getParent().getParentArtifact() != null)
current = current.getParent();
Version parentVersion = null;
Artifact parentArtifact = current.getParentArtifact();
if (parentArtifact != null && isVespaParent(parentArtifact.getGroupId())) {
parentVersion = Version.from(parentArtifact.getVersion());
if (parentVersion.compareTo(compileVersion) < 0)
throw new IllegalArgumentException("compile version (" + compileVersion + ") cannot be higher than parent version (" + parentVersion + ")");
}
String metaData = String.format("{\n" +
" \"compileVersion\": \"%s\",\n" +
" \"buildTime\": %d,\n" +
" \"parentVersion\": %s\n" +
"}",
compileVersion,
System.currentTimeMillis(),
parentVersion == null ? null : "\"" + parentVersion + "\"");
try {
Files.write(applicationDestination.toPath().resolve("build-meta.json"),
metaData.getBytes(StandardCharsets.UTF_8));
}
catch (IOException e) {
throw new MojoExecutionException("Failed writing compile version and build time.", e);
}
}
private void copyBundlesForSubModules(File componentsDir) throws MojoExecutionException {
List<String> modules = emptyListIfNull(project.getModules());
for (String module : modules) {
File moduleDir = new File(project.getBasedir(), module);
if (moduleDir.exists()) {
copyModuleBundles(moduleDir, componentsDir);
}
}
}
private File createComponentsDir(File applicationDestination) throws MojoExecutionException {
File componentsDir = new File(applicationDestination, "components");
componentsDir.mkdirs();
if (!componentsDir.exists() || !componentsDir.isDirectory()) {
throw new MojoExecutionException("Failed creating components directory (" + componentsDir + ")");
}
return componentsDir;
}
private void copyApplicationPackage(File applicationPackage, File applicationDestination) throws MojoExecutionException {
if (applicationPackage.exists()) {
try {
FileUtils.copyDirectory(applicationPackage, applicationDestination);
} catch (IOException e) {
throw new MojoExecutionException("Failed copying applicationPackage", e);
}
}
}
private void copyModuleBundles(File moduleDir, File componentsDir) throws MojoExecutionException {
File moduleTargetDir = new File(moduleDir, "target");
if (moduleTargetDir.exists()) {
File[] bundles = moduleTargetDir.listFiles((dir, name) -> name.endsWith("-deploy.jar") ||
name.endsWith("-bundle.jar") ||
name.endsWith("-jar-with-dependencies.jar"));
if (bundles == null) return;
for (File bundle : bundles) {
try {
copyFile(bundle, new File(componentsDir, bundle.getName()));
getLog().info("Copying bundle to application: " + bundle.getName());
} catch (IOException e) {
throw new MojoExecutionException("Failed copying bundle " + bundle, e);
}
}
}
}
private void copyFile(File source, File destination) throws IOException {
try (FileInputStream sourceStream = new FileInputStream(source);
FileOutputStream destinationStream = new FileOutputStream(destination)) {
sourceStream.transferTo(destinationStream);
}
}
private static <T> List<T> emptyListIfNull(List<T> modules) {
return modules == null ? Collections.emptyList(): modules;
}
} | class ApplicationMojo extends AbstractMojo {
@Parameter(defaultValue = "${project}", readonly = true)
protected MavenProject project;
@Parameter(property = "vespaversion")
private String vespaversion;
@Parameter(defaultValue = "src/main/application")
private String sourceDir;
@Parameter(defaultValue = "target/application")
private String destinationDir;
@Override
public void execute() throws MojoExecutionException {
File applicationPackage = new File(project.getBasedir(), sourceDir);
File applicationDestination = new File(project.getBasedir(), destinationDir);
copyApplicationPackage(applicationPackage, applicationDestination);
addBuildMetaData(applicationDestination);
File componentsDir = createComponentsDir(applicationDestination);
copyModuleBundles(project.getBasedir(), componentsDir);
copyBundlesForSubModules(componentsDir);
try {
Compression.zipDirectory(applicationDestination, "");
} catch (Exception e) {
throw new MojoExecutionException("Failed zipping application.", e);
}
}
/** Writes meta data about this package if the destination directory exists. */
private void addBuildMetaData(File applicationDestination) throws MojoExecutionException {
if ( ! applicationDestination.exists()) return;
if (vespaversion == null)
vespaversion = project.getPlugin("com.yahoo.vespa:vespa-application-maven-plugin").getVersion();
Version compileVersion = Version.from(vespaversion);
MavenProject current = project;
while (current.getParent() != null && current.getParent().getParentArtifact() != null)
current = current.getParent();
Version parentVersion = null;
Artifact parentArtifact = current.getParentArtifact();
if (parentArtifact != null && isVespaParent(parentArtifact.getGroupId())) {
parentVersion = Version.from(parentArtifact.getVersion());
if (parentVersion.compareTo(compileVersion) < 0)
throw new IllegalArgumentException("compile version (" + compileVersion + ") cannot be higher than parent version (" + parentVersion + ")");
}
String metaData = String.format("{\n" +
" \"compileVersion\": \"%s\",\n" +
" \"buildTime\": %d,\n" +
" \"parentVersion\": %s\n" +
"}",
compileVersion,
System.currentTimeMillis(),
parentVersion == null ? null : "\"" + parentVersion + "\"");
try {
Files.write(applicationDestination.toPath().resolve("build-meta.json"),
metaData.getBytes(StandardCharsets.UTF_8));
}
catch (IOException e) {
throw new MojoExecutionException("Failed writing compile version and build time.", e);
}
}
private void copyBundlesForSubModules(File componentsDir) throws MojoExecutionException {
List<String> modules = emptyListIfNull(project.getModules());
for (String module : modules) {
File moduleDir = new File(project.getBasedir(), module);
if (moduleDir.exists()) {
copyModuleBundles(moduleDir, componentsDir);
}
}
}
private File createComponentsDir(File applicationDestination) throws MojoExecutionException {
File componentsDir = new File(applicationDestination, "components");
componentsDir.mkdirs();
if (!componentsDir.exists() || !componentsDir.isDirectory()) {
throw new MojoExecutionException("Failed creating components directory (" + componentsDir + ")");
}
return componentsDir;
}
private void copyApplicationPackage(File applicationPackage, File applicationDestination) throws MojoExecutionException {
if (applicationPackage.exists()) {
try {
FileUtils.copyDirectory(applicationPackage, applicationDestination);
} catch (IOException e) {
throw new MojoExecutionException("Failed copying applicationPackage", e);
}
}
}
private void copyModuleBundles(File moduleDir, File componentsDir) throws MojoExecutionException {
File moduleTargetDir = new File(moduleDir, "target");
if (moduleTargetDir.exists()) {
File[] bundles = moduleTargetDir.listFiles((dir, name) -> name.endsWith("-deploy.jar") ||
name.endsWith("-bundle.jar") ||
name.endsWith("-jar-with-dependencies.jar"));
if (bundles == null) return;
for (File bundle : bundles) {
try {
copyFile(bundle, new File(componentsDir, bundle.getName()));
getLog().info("Copying bundle to application: " + bundle.getName());
} catch (IOException e) {
throw new MojoExecutionException("Failed copying bundle " + bundle, e);
}
}
}
}
private void copyFile(File source, File destination) throws IOException {
try (FileInputStream sourceStream = new FileInputStream(source);
FileOutputStream destinationStream = new FileOutputStream(destination)) {
sourceStream.transferTo(destinationStream);
}
}
private static <T> List<T> emptyListIfNull(List<T> modules) {
return modules == null ? Collections.emptyList(): modules;
}
} |
Remove system? The issue could be with any type of tests or even just test package layout. | static TestSummary validateTests(Collection<Suite> expectedSuites, byte[] testPackage) {
List<String> problems = new ArrayList<>();
Set<Suite> suites = new LinkedHashSet<>();
ZipEntries.from(testPackage, __ -> true, 0, false).asList().stream()
.map(entry -> Path.fromString(entry.name()))
.collect(groupingBy(path -> path.elements().size() > 1 ? path.elements().get(0) : "",
mapping(path -> (path.elements().size() > 1 ? path.getChildPath() : path).getRelative(), toList())))
.forEach((directory, paths) -> {
switch (directory) {
case "components": {
for (String path : paths) {
if (path.endsWith("-tests.jar")) {
try {
byte[] testsJar = ZipEntries.readFile(testPackage, "components/" + path, 1 << 30);
Manifest manifest = new JarInputStream(new ByteArrayInputStream(testsJar)).getManifest();
for (String suite : manifest.getMainAttributes().getValue("X-JDisc-Test-Bundle-Categories").split(","))
switch (suite.trim()) {
case "SystemTest": suites.add(system); break;
case "StagingSetup": suites.add(staging_setup); break;
case "StagingTest": suites.add(staging); break;
case "ProductionTest": suites.add(production); break;
default: problems.add("unexpected test suite name '" + suite + "' in bundle manifest");
}
}
catch (Exception e) {
problems.add("failed reading test bundle manifest: " + Exceptions.toMessageString(e));
}
}
}
}
break;
case "tests": {
if (paths.stream().anyMatch(Pattern.compile("system-test/.+\\.json").asMatchPredicate())) suites.add(system);
if (paths.stream().anyMatch(Pattern.compile("staging-setup/.+\\.json").asMatchPredicate())) suites.add(staging_setup);
if (paths.stream().anyMatch(Pattern.compile("staging-test/.+\\.json").asMatchPredicate())) suites.add(staging);
if (paths.stream().anyMatch(Pattern.compile("production-test/.+\\.json").asMatchPredicate())) suites.add(production);
}
break;
case "artifacts": {
if (paths.stream().anyMatch(Pattern.compile(".+-tests.jar").asMatchPredicate()))
suites.addAll(expectedSuites);
for (String forbidden : List.of("key", "cert"))
if (paths.contains(forbidden))
problems.add("test package contains 'artifacts/" + forbidden +
"'; this conflicts with credentials used to run tests in Vespa Cloud");
}
break;
}
});
if (expectedSuites.contains(system) && ! suites.contains(system))
problems.add("test package has no system tests, but <test /> is declared in deployment.xml");
if (suites.contains(staging) != suites.contains(staging_setup))
problems.add("test package has " + (suites.contains(staging) ? "staging tests" : "staging setup") +
", so it should also include " + (suites.contains(staging) ? "staging setup" : "staging tests"));
else if (expectedSuites.contains(staging) && ! suites.contains(staging))
problems.add("test package has no staging setup and tests, but <staging /> is declared in deployment.xml");
if (suites.contains(production) != expectedSuites.contains(production))
problems.add("test package has " + (suites.contains(production) ? "" : "no ") + "production tests, " +
"but " + (suites.contains(production) ? "no " : "") + "production tests are declared in deployment.xml");
if ( ! problems.isEmpty())
problems.add("see https:
return new TestSummary(problems, suites);
} | problems.add("see https: | static TestSummary validateTests(Collection<Suite> expectedSuites, byte[] testPackage) {
List<String> problems = new ArrayList<>();
Set<Suite> suites = new LinkedHashSet<>();
ZipEntries.from(testPackage, __ -> true, 0, false).asList().stream()
.map(entry -> Path.fromString(entry.name()))
.collect(groupingBy(path -> path.elements().size() > 1 ? path.elements().get(0) : "",
mapping(path -> (path.elements().size() > 1 ? path.getChildPath() : path).getRelative(), toList())))
.forEach((directory, paths) -> {
switch (directory) {
case "components": {
for (String path : paths) {
if (path.endsWith("-tests.jar")) {
try {
byte[] testsJar = ZipEntries.readFile(testPackage, "components/" + path, 1 << 30);
Manifest manifest = new JarInputStream(new ByteArrayInputStream(testsJar)).getManifest();
for (String suite : manifest.getMainAttributes().getValue("X-JDisc-Test-Bundle-Categories").split(","))
switch (suite.trim()) {
case "SystemTest": suites.add(system); break;
case "StagingSetup": suites.add(staging_setup); break;
case "StagingTest": suites.add(staging); break;
case "ProductionTest": suites.add(production); break;
default: problems.add("unexpected test suite name '" + suite + "' in bundle manifest");
}
}
catch (Exception e) {
problems.add("failed reading test bundle manifest: " + Exceptions.toMessageString(e));
}
}
}
}
break;
case "tests": {
if (paths.stream().anyMatch(Pattern.compile("system-test/.+\\.json").asMatchPredicate())) suites.add(system);
if (paths.stream().anyMatch(Pattern.compile("staging-setup/.+\\.json").asMatchPredicate())) suites.add(staging_setup);
if (paths.stream().anyMatch(Pattern.compile("staging-test/.+\\.json").asMatchPredicate())) suites.add(staging);
if (paths.stream().anyMatch(Pattern.compile("production-test/.+\\.json").asMatchPredicate())) suites.add(production);
}
break;
case "artifacts": {
if (paths.stream().anyMatch(Pattern.compile(".+-tests.jar").asMatchPredicate()))
suites.addAll(expectedSuites);
for (String forbidden : List.of("key", "cert"))
if (paths.contains(forbidden))
problems.add("test package contains 'artifacts/" + forbidden +
"'; this conflicts with credentials used to run tests in Vespa Cloud");
}
break;
}
});
if (expectedSuites.contains(system) && ! suites.contains(system))
problems.add("test package has no system tests, but <test /> is declared in deployment.xml");
if (suites.contains(staging) != suites.contains(staging_setup))
problems.add("test package has " + (suites.contains(staging) ? "staging tests" : "staging setup") +
", so it should also include " + (suites.contains(staging) ? "staging setup" : "staging tests"));
else if (expectedSuites.contains(staging) && ! suites.contains(staging))
problems.add("test package has no staging setup and tests, but <staging /> is declared in deployment.xml");
if (suites.contains(production) != expectedSuites.contains(production))
problems.add("test package has " + (suites.contains(production) ? "" : "no ") + "production tests, " +
"but " + (suites.contains(production) ? "no " : "") + "production tests are declared in deployment.xml");
if ( ! problems.isEmpty())
problems.add("see https:
return new TestSummary(problems, suites);
} | class TestPackage {
static final NodeResources DEFAULT_TESTER_RESOURCES_AWS = new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final NodeResources DEFAULT_TESTER_RESOURCES = new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
private final ApplicationPackage applicationPackage;
private final X509Certificate certificate;
public TestPackage(byte[] testPackage, boolean isPublicSystem, RunId id, Testerapp testerApp,
DeploymentSpec spec, Instant certificateValidFrom, Duration certificateValidDuration) {
Map<String, byte[]> entries = new HashMap<>();
entries.put("artifacts/.ignore-" + UUID.randomUUID(), new byte[0]);
entries.put("tests/.ignore-" + UUID.randomUUID(), new byte[0]);
entries.put(servicesFile,
servicesXml( ! isPublicSystem,
certificateValidFrom != null,
testerResourcesFor(id.type().zone(), spec.requireInstance(id.application().instance())),
testerApp));
entries.put(deploymentFile,
deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance())
.athenzService(id.type().zone().environment(), id.type().zone().region())));
if (certificateValidFrom != null) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
this.certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
certificateValidFrom,
certificateValidFrom.plus(certificateValidDuration),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
entries.put("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
entries.put("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
else {
this.certificate = null;
}
ByteArrayOutputStream buffer = new ByteArrayOutputStream(testPackage.length + 10_000);
transferAndWrite(buffer, new ByteArrayInputStream(testPackage), entries);
this.applicationPackage = new ApplicationPackage(buffer.toByteArray());
}
public ApplicationPackage asApplicationPackage() {
return applicationPackage;
}
public X509Certificate certificate() {
return Objects.requireNonNull(certificate);
}
public static TestSummary validateTests(DeploymentSpec spec, byte[] testPackage) {
return validateTests(expectedSuites(spec.steps()), testPackage);
}
public static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) {
NodeResources nodeResources = spec.steps().stream()
.filter(step -> step.concerns(zone.environment()))
.findFirst()
.flatMap(step -> step.zones().get(0).testerFlavor())
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ? DEFAULT_TESTER_RESOURCES_AWS
: DEFAULT_TESTER_RESOURCES);
return nodeResources.with(NodeResources.DiskSpeed.any);
}
/** Returns the generated services.xml content for the tester application. */
public static byte[] servicesXml(boolean systemUsesAthenz, boolean useTesterCertificate,
NodeResources resources, ControllerConfig.Steprunner.Testerapp config) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = Text.format("<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
String runtimeProviderClass = config.runtimeProviderClass();
String tenantCdBundle = config.tenantCdBundle();
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.testrunner.TestRunnerHandler\" bundle=\"vespa-osgi-testrunner\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <component id=\"" + runtimeProviderClass + "\" bundle=\"" + tenantCdBundle + "\" />\n" +
"\n" +
" <component id=\"com.yahoo.vespa.testrunner.JunitRunner\" bundle=\"vespa-osgi-testrunner\">\n" +
" <config name=\"com.yahoo.vespa.testrunner.junit-test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.testrunner.VespaCliTestRunner\" bundle=\"vespa-osgi-testrunner\">\n" +
" <config name=\"com.yahoo.vespa.testrunner.vespa-cli-test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <testsPath>tests</testsPath>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <nodes count=\"1\">\n" +
" <jvm allocated-memory=\"" + jdiscMemoryPct + "%\"/>\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
public static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
static Set<Suite> expectedSuites(List<Step> steps) {
Set<Suite> suites = new HashSet<>();
if (steps.isEmpty()) return suites;
for (Step step : steps) {
if (step.isTest()) {
if (step.concerns(Environment.prod)) suites.add(production);
if (step.concerns(Environment.test)) suites.add(system);
if (step.concerns(Environment.staging)) { suites.add(staging); suites.add(staging_setup); }
}
else
suites.addAll(expectedSuites(step.steps()));
}
return suites;
}
public static class TestSummary {
private final List<String> problems;
private final List<Suite> suites;
public TestSummary(List<String> problems, Set<Suite> suites) {
this.problems = List.copyOf(problems);
this.suites = List.copyOf(suites);
}
public List<String> problems() {
return problems;
}
public List<Suite> suites() {
return suites;
}
}
} | class TestPackage {
static final NodeResources DEFAULT_TESTER_RESOURCES_AWS = new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final NodeResources DEFAULT_TESTER_RESOURCES = new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
private final ApplicationPackage applicationPackage;
private final X509Certificate certificate;
public TestPackage(byte[] testPackage, boolean isPublicSystem, RunId id, Testerapp testerApp,
DeploymentSpec spec, Instant certificateValidFrom, Duration certificateValidDuration) {
Map<String, byte[]> entries = new HashMap<>();
entries.put("artifacts/.ignore-" + UUID.randomUUID(), new byte[0]);
entries.put("tests/.ignore-" + UUID.randomUUID(), new byte[0]);
entries.put(servicesFile,
servicesXml( ! isPublicSystem,
certificateValidFrom != null,
testerResourcesFor(id.type().zone(), spec.requireInstance(id.application().instance())),
testerApp));
entries.put(deploymentFile,
deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance())
.athenzService(id.type().zone().environment(), id.type().zone().region())));
if (certificateValidFrom != null) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
this.certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
certificateValidFrom,
certificateValidFrom.plus(certificateValidDuration),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
entries.put("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
entries.put("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
else {
this.certificate = null;
}
ByteArrayOutputStream buffer = new ByteArrayOutputStream(testPackage.length + 10_000);
transferAndWrite(buffer, new ByteArrayInputStream(testPackage), entries);
this.applicationPackage = new ApplicationPackage(buffer.toByteArray());
}
public ApplicationPackage asApplicationPackage() {
return applicationPackage;
}
public X509Certificate certificate() {
return Objects.requireNonNull(certificate);
}
public static TestSummary validateTests(DeploymentSpec spec, byte[] testPackage) {
return validateTests(expectedSuites(spec.steps()), testPackage);
}
public static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) {
NodeResources nodeResources = spec.steps().stream()
.filter(step -> step.concerns(zone.environment()))
.findFirst()
.flatMap(step -> step.zones().get(0).testerFlavor())
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ? DEFAULT_TESTER_RESOURCES_AWS
: DEFAULT_TESTER_RESOURCES);
return nodeResources.with(NodeResources.DiskSpeed.any);
}
/** Returns the generated services.xml content for the tester application. */
public static byte[] servicesXml(boolean systemUsesAthenz, boolean useTesterCertificate,
NodeResources resources, ControllerConfig.Steprunner.Testerapp config) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = Text.format("<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
String runtimeProviderClass = config.runtimeProviderClass();
String tenantCdBundle = config.tenantCdBundle();
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.testrunner.TestRunnerHandler\" bundle=\"vespa-osgi-testrunner\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <component id=\"" + runtimeProviderClass + "\" bundle=\"" + tenantCdBundle + "\" />\n" +
"\n" +
" <component id=\"com.yahoo.vespa.testrunner.JunitRunner\" bundle=\"vespa-osgi-testrunner\">\n" +
" <config name=\"com.yahoo.vespa.testrunner.junit-test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.testrunner.VespaCliTestRunner\" bundle=\"vespa-osgi-testrunner\">\n" +
" <config name=\"com.yahoo.vespa.testrunner.vespa-cli-test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <testsPath>tests</testsPath>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <nodes count=\"1\">\n" +
" <jvm allocated-memory=\"" + jdiscMemoryPct + "%\"/>\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
public static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
static Set<Suite> expectedSuites(List<Step> steps) {
Set<Suite> suites = new HashSet<>();
if (steps.isEmpty()) return suites;
for (Step step : steps) {
if (step.isTest()) {
if (step.concerns(Environment.prod)) suites.add(production);
if (step.concerns(Environment.test)) suites.add(system);
if (step.concerns(Environment.staging)) { suites.add(staging); suites.add(staging_setup); }
}
else
suites.addAll(expectedSuites(step.steps()));
}
return suites;
}
public static class TestSummary {
private final List<String> problems;
private final List<Suite> suites;
public TestSummary(List<String> problems, Set<Suite> suites) {
this.problems = List.copyOf(problems);
this.suites = List.copyOf(suites);
}
public List<String> problems() {
return problems;
}
public List<Suite> suites() {
return suites;
}
}
} |
Sure. | static TestSummary validateTests(Collection<Suite> expectedSuites, byte[] testPackage) {
List<String> problems = new ArrayList<>();
Set<Suite> suites = new LinkedHashSet<>();
ZipEntries.from(testPackage, __ -> true, 0, false).asList().stream()
.map(entry -> Path.fromString(entry.name()))
.collect(groupingBy(path -> path.elements().size() > 1 ? path.elements().get(0) : "",
mapping(path -> (path.elements().size() > 1 ? path.getChildPath() : path).getRelative(), toList())))
.forEach((directory, paths) -> {
switch (directory) {
case "components": {
for (String path : paths) {
if (path.endsWith("-tests.jar")) {
try {
byte[] testsJar = ZipEntries.readFile(testPackage, "components/" + path, 1 << 30);
Manifest manifest = new JarInputStream(new ByteArrayInputStream(testsJar)).getManifest();
for (String suite : manifest.getMainAttributes().getValue("X-JDisc-Test-Bundle-Categories").split(","))
switch (suite.trim()) {
case "SystemTest": suites.add(system); break;
case "StagingSetup": suites.add(staging_setup); break;
case "StagingTest": suites.add(staging); break;
case "ProductionTest": suites.add(production); break;
default: problems.add("unexpected test suite name '" + suite + "' in bundle manifest");
}
}
catch (Exception e) {
problems.add("failed reading test bundle manifest: " + Exceptions.toMessageString(e));
}
}
}
}
break;
case "tests": {
if (paths.stream().anyMatch(Pattern.compile("system-test/.+\\.json").asMatchPredicate())) suites.add(system);
if (paths.stream().anyMatch(Pattern.compile("staging-setup/.+\\.json").asMatchPredicate())) suites.add(staging_setup);
if (paths.stream().anyMatch(Pattern.compile("staging-test/.+\\.json").asMatchPredicate())) suites.add(staging);
if (paths.stream().anyMatch(Pattern.compile("production-test/.+\\.json").asMatchPredicate())) suites.add(production);
}
break;
case "artifacts": {
if (paths.stream().anyMatch(Pattern.compile(".+-tests.jar").asMatchPredicate()))
suites.addAll(expectedSuites);
for (String forbidden : List.of("key", "cert"))
if (paths.contains(forbidden))
problems.add("test package contains 'artifacts/" + forbidden +
"'; this conflicts with credentials used to run tests in Vespa Cloud");
}
break;
}
});
if (expectedSuites.contains(system) && ! suites.contains(system))
problems.add("test package has no system tests, but <test /> is declared in deployment.xml");
if (suites.contains(staging) != suites.contains(staging_setup))
problems.add("test package has " + (suites.contains(staging) ? "staging tests" : "staging setup") +
", so it should also include " + (suites.contains(staging) ? "staging setup" : "staging tests"));
else if (expectedSuites.contains(staging) && ! suites.contains(staging))
problems.add("test package has no staging setup and tests, but <staging /> is declared in deployment.xml");
if (suites.contains(production) != expectedSuites.contains(production))
problems.add("test package has " + (suites.contains(production) ? "" : "no ") + "production tests, " +
"but " + (suites.contains(production) ? "no " : "") + "production tests are declared in deployment.xml");
if ( ! problems.isEmpty())
problems.add("see https:
return new TestSummary(problems, suites);
} | problems.add("see https: | static TestSummary validateTests(Collection<Suite> expectedSuites, byte[] testPackage) {
List<String> problems = new ArrayList<>();
Set<Suite> suites = new LinkedHashSet<>();
ZipEntries.from(testPackage, __ -> true, 0, false).asList().stream()
.map(entry -> Path.fromString(entry.name()))
.collect(groupingBy(path -> path.elements().size() > 1 ? path.elements().get(0) : "",
mapping(path -> (path.elements().size() > 1 ? path.getChildPath() : path).getRelative(), toList())))
.forEach((directory, paths) -> {
switch (directory) {
case "components": {
for (String path : paths) {
if (path.endsWith("-tests.jar")) {
try {
byte[] testsJar = ZipEntries.readFile(testPackage, "components/" + path, 1 << 30);
Manifest manifest = new JarInputStream(new ByteArrayInputStream(testsJar)).getManifest();
for (String suite : manifest.getMainAttributes().getValue("X-JDisc-Test-Bundle-Categories").split(","))
switch (suite.trim()) {
case "SystemTest": suites.add(system); break;
case "StagingSetup": suites.add(staging_setup); break;
case "StagingTest": suites.add(staging); break;
case "ProductionTest": suites.add(production); break;
default: problems.add("unexpected test suite name '" + suite + "' in bundle manifest");
}
}
catch (Exception e) {
problems.add("failed reading test bundle manifest: " + Exceptions.toMessageString(e));
}
}
}
}
break;
case "tests": {
if (paths.stream().anyMatch(Pattern.compile("system-test/.+\\.json").asMatchPredicate())) suites.add(system);
if (paths.stream().anyMatch(Pattern.compile("staging-setup/.+\\.json").asMatchPredicate())) suites.add(staging_setup);
if (paths.stream().anyMatch(Pattern.compile("staging-test/.+\\.json").asMatchPredicate())) suites.add(staging);
if (paths.stream().anyMatch(Pattern.compile("production-test/.+\\.json").asMatchPredicate())) suites.add(production);
}
break;
case "artifacts": {
if (paths.stream().anyMatch(Pattern.compile(".+-tests.jar").asMatchPredicate()))
suites.addAll(expectedSuites);
for (String forbidden : List.of("key", "cert"))
if (paths.contains(forbidden))
problems.add("test package contains 'artifacts/" + forbidden +
"'; this conflicts with credentials used to run tests in Vespa Cloud");
}
break;
}
});
if (expectedSuites.contains(system) && ! suites.contains(system))
problems.add("test package has no system tests, but <test /> is declared in deployment.xml");
if (suites.contains(staging) != suites.contains(staging_setup))
problems.add("test package has " + (suites.contains(staging) ? "staging tests" : "staging setup") +
", so it should also include " + (suites.contains(staging) ? "staging setup" : "staging tests"));
else if (expectedSuites.contains(staging) && ! suites.contains(staging))
problems.add("test package has no staging setup and tests, but <staging /> is declared in deployment.xml");
if (suites.contains(production) != expectedSuites.contains(production))
problems.add("test package has " + (suites.contains(production) ? "" : "no ") + "production tests, " +
"but " + (suites.contains(production) ? "no " : "") + "production tests are declared in deployment.xml");
if ( ! problems.isEmpty())
problems.add("see https:
return new TestSummary(problems, suites);
} | class TestPackage {
static final NodeResources DEFAULT_TESTER_RESOURCES_AWS = new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final NodeResources DEFAULT_TESTER_RESOURCES = new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
private final ApplicationPackage applicationPackage;
private final X509Certificate certificate;
public TestPackage(byte[] testPackage, boolean isPublicSystem, RunId id, Testerapp testerApp,
DeploymentSpec spec, Instant certificateValidFrom, Duration certificateValidDuration) {
Map<String, byte[]> entries = new HashMap<>();
entries.put("artifacts/.ignore-" + UUID.randomUUID(), new byte[0]);
entries.put("tests/.ignore-" + UUID.randomUUID(), new byte[0]);
entries.put(servicesFile,
servicesXml( ! isPublicSystem,
certificateValidFrom != null,
testerResourcesFor(id.type().zone(), spec.requireInstance(id.application().instance())),
testerApp));
entries.put(deploymentFile,
deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance())
.athenzService(id.type().zone().environment(), id.type().zone().region())));
if (certificateValidFrom != null) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
this.certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
certificateValidFrom,
certificateValidFrom.plus(certificateValidDuration),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
entries.put("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
entries.put("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
else {
this.certificate = null;
}
ByteArrayOutputStream buffer = new ByteArrayOutputStream(testPackage.length + 10_000);
transferAndWrite(buffer, new ByteArrayInputStream(testPackage), entries);
this.applicationPackage = new ApplicationPackage(buffer.toByteArray());
}
public ApplicationPackage asApplicationPackage() {
return applicationPackage;
}
public X509Certificate certificate() {
return Objects.requireNonNull(certificate);
}
public static TestSummary validateTests(DeploymentSpec spec, byte[] testPackage) {
return validateTests(expectedSuites(spec.steps()), testPackage);
}
public static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) {
NodeResources nodeResources = spec.steps().stream()
.filter(step -> step.concerns(zone.environment()))
.findFirst()
.flatMap(step -> step.zones().get(0).testerFlavor())
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ? DEFAULT_TESTER_RESOURCES_AWS
: DEFAULT_TESTER_RESOURCES);
return nodeResources.with(NodeResources.DiskSpeed.any);
}
/** Returns the generated services.xml content for the tester application. */
public static byte[] servicesXml(boolean systemUsesAthenz, boolean useTesterCertificate,
NodeResources resources, ControllerConfig.Steprunner.Testerapp config) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = Text.format("<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
String runtimeProviderClass = config.runtimeProviderClass();
String tenantCdBundle = config.tenantCdBundle();
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.testrunner.TestRunnerHandler\" bundle=\"vespa-osgi-testrunner\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <component id=\"" + runtimeProviderClass + "\" bundle=\"" + tenantCdBundle + "\" />\n" +
"\n" +
" <component id=\"com.yahoo.vespa.testrunner.JunitRunner\" bundle=\"vespa-osgi-testrunner\">\n" +
" <config name=\"com.yahoo.vespa.testrunner.junit-test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.testrunner.VespaCliTestRunner\" bundle=\"vespa-osgi-testrunner\">\n" +
" <config name=\"com.yahoo.vespa.testrunner.vespa-cli-test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <testsPath>tests</testsPath>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <nodes count=\"1\">\n" +
" <jvm allocated-memory=\"" + jdiscMemoryPct + "%\"/>\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
public static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
static Set<Suite> expectedSuites(List<Step> steps) {
Set<Suite> suites = new HashSet<>();
if (steps.isEmpty()) return suites;
for (Step step : steps) {
if (step.isTest()) {
if (step.concerns(Environment.prod)) suites.add(production);
if (step.concerns(Environment.test)) suites.add(system);
if (step.concerns(Environment.staging)) { suites.add(staging); suites.add(staging_setup); }
}
else
suites.addAll(expectedSuites(step.steps()));
}
return suites;
}
public static class TestSummary {
private final List<String> problems;
private final List<Suite> suites;
public TestSummary(List<String> problems, Set<Suite> suites) {
this.problems = List.copyOf(problems);
this.suites = List.copyOf(suites);
}
public List<String> problems() {
return problems;
}
public List<Suite> suites() {
return suites;
}
}
} | class TestPackage {
static final NodeResources DEFAULT_TESTER_RESOURCES_AWS = new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final NodeResources DEFAULT_TESTER_RESOURCES = new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
private final ApplicationPackage applicationPackage;
private final X509Certificate certificate;
public TestPackage(byte[] testPackage, boolean isPublicSystem, RunId id, Testerapp testerApp,
DeploymentSpec spec, Instant certificateValidFrom, Duration certificateValidDuration) {
Map<String, byte[]> entries = new HashMap<>();
entries.put("artifacts/.ignore-" + UUID.randomUUID(), new byte[0]);
entries.put("tests/.ignore-" + UUID.randomUUID(), new byte[0]);
entries.put(servicesFile,
servicesXml( ! isPublicSystem,
certificateValidFrom != null,
testerResourcesFor(id.type().zone(), spec.requireInstance(id.application().instance())),
testerApp));
entries.put(deploymentFile,
deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance())
.athenzService(id.type().zone().environment(), id.type().zone().region())));
if (certificateValidFrom != null) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
this.certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
certificateValidFrom,
certificateValidFrom.plus(certificateValidDuration),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
entries.put("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
entries.put("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
else {
this.certificate = null;
}
ByteArrayOutputStream buffer = new ByteArrayOutputStream(testPackage.length + 10_000);
transferAndWrite(buffer, new ByteArrayInputStream(testPackage), entries);
this.applicationPackage = new ApplicationPackage(buffer.toByteArray());
}
public ApplicationPackage asApplicationPackage() {
return applicationPackage;
}
public X509Certificate certificate() {
return Objects.requireNonNull(certificate);
}
public static TestSummary validateTests(DeploymentSpec spec, byte[] testPackage) {
return validateTests(expectedSuites(spec.steps()), testPackage);
}
public static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) {
NodeResources nodeResources = spec.steps().stream()
.filter(step -> step.concerns(zone.environment()))
.findFirst()
.flatMap(step -> step.zones().get(0).testerFlavor())
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ? DEFAULT_TESTER_RESOURCES_AWS
: DEFAULT_TESTER_RESOURCES);
return nodeResources.with(NodeResources.DiskSpeed.any);
}
/** Returns the generated services.xml content for the tester application. */
public static byte[] servicesXml(boolean systemUsesAthenz, boolean useTesterCertificate,
NodeResources resources, ControllerConfig.Steprunner.Testerapp config) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = Text.format("<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
String runtimeProviderClass = config.runtimeProviderClass();
String tenantCdBundle = config.tenantCdBundle();
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.testrunner.TestRunnerHandler\" bundle=\"vespa-osgi-testrunner\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <component id=\"" + runtimeProviderClass + "\" bundle=\"" + tenantCdBundle + "\" />\n" +
"\n" +
" <component id=\"com.yahoo.vespa.testrunner.JunitRunner\" bundle=\"vespa-osgi-testrunner\">\n" +
" <config name=\"com.yahoo.vespa.testrunner.junit-test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.testrunner.VespaCliTestRunner\" bundle=\"vespa-osgi-testrunner\">\n" +
" <config name=\"com.yahoo.vespa.testrunner.vespa-cli-test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <testsPath>tests</testsPath>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <nodes count=\"1\">\n" +
" <jvm allocated-memory=\"" + jdiscMemoryPct + "%\"/>\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
public static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
static Set<Suite> expectedSuites(List<Step> steps) {
Set<Suite> suites = new HashSet<>();
if (steps.isEmpty()) return suites;
for (Step step : steps) {
if (step.isTest()) {
if (step.concerns(Environment.prod)) suites.add(production);
if (step.concerns(Environment.test)) suites.add(system);
if (step.concerns(Environment.staging)) { suites.add(staging); suites.add(staging_setup); }
}
else
suites.addAll(expectedSuites(step.steps()));
}
return suites;
}
public static class TestSummary {
private final List<String> problems;
private final List<Suite> suites;
public TestSummary(List<String> problems, Set<Suite> suites) {
this.problems = List.copyOf(problems);
this.suites = List.copyOf(suites);
}
public List<String> problems() {
return problems;
}
public List<Suite> suites() {
return suites;
}
}
} |
Add `^` to avoid matching with example.com.ai.vespa? | static boolean isVespaParent(String groupId) {
return groupId.matches("(com\\.yahoo\\.vespa|ai\\.vespa)(\\..+)?");
} | return groupId.matches("(com\\.yahoo\\.vespa|ai\\.vespa)(\\..+)?"); | static boolean isVespaParent(String groupId) {
return groupId.matches("(com\\.yahoo\\.vespa|ai\\.vespa)(\\..+)?");
} | class ApplicationMojo extends AbstractMojo {
@Parameter(defaultValue = "${project}", readonly = true)
protected MavenProject project;
@Parameter(property = "vespaversion")
private String vespaversion;
@Parameter(defaultValue = "src/main/application")
private String sourceDir;
@Parameter(defaultValue = "target/application")
private String destinationDir;
@Override
public void execute() throws MojoExecutionException {
File applicationPackage = new File(project.getBasedir(), sourceDir);
File applicationDestination = new File(project.getBasedir(), destinationDir);
copyApplicationPackage(applicationPackage, applicationDestination);
addBuildMetaData(applicationDestination);
File componentsDir = createComponentsDir(applicationDestination);
copyModuleBundles(project.getBasedir(), componentsDir);
copyBundlesForSubModules(componentsDir);
try {
Compression.zipDirectory(applicationDestination, "");
} catch (Exception e) {
throw new MojoExecutionException("Failed zipping application.", e);
}
}
/** Writes meta data about this package if the destination directory exists. */
private void addBuildMetaData(File applicationDestination) throws MojoExecutionException {
if ( ! applicationDestination.exists()) return;
if (vespaversion == null)
vespaversion = project.getPlugin("com.yahoo.vespa:vespa-application-maven-plugin").getVersion();
Version compileVersion = Version.from(vespaversion);
MavenProject current = project;
while (current.getParent() != null && current.getParent().getParentArtifact() != null)
current = current.getParent();
Version parentVersion = null;
Artifact parentArtifact = current.getParentArtifact();
if (parentArtifact != null && isVespaParent(parentArtifact.getGroupId())) {
parentVersion = Version.from(parentArtifact.getVersion());
if (parentVersion.compareTo(compileVersion) < 0)
throw new IllegalArgumentException("compile version (" + compileVersion + ") cannot be higher than parent version (" + parentVersion + ")");
}
String metaData = String.format("{\n" +
" \"compileVersion\": \"%s\",\n" +
" \"buildTime\": %d,\n" +
" \"parentVersion\": %s\n" +
"}",
compileVersion,
System.currentTimeMillis(),
parentVersion == null ? null : "\"" + parentVersion + "\"");
try {
Files.write(applicationDestination.toPath().resolve("build-meta.json"),
metaData.getBytes(StandardCharsets.UTF_8));
}
catch (IOException e) {
throw new MojoExecutionException("Failed writing compile version and build time.", e);
}
}
private void copyBundlesForSubModules(File componentsDir) throws MojoExecutionException {
List<String> modules = emptyListIfNull(project.getModules());
for (String module : modules) {
File moduleDir = new File(project.getBasedir(), module);
if (moduleDir.exists()) {
copyModuleBundles(moduleDir, componentsDir);
}
}
}
private File createComponentsDir(File applicationDestination) throws MojoExecutionException {
File componentsDir = new File(applicationDestination, "components");
componentsDir.mkdirs();
if (!componentsDir.exists() || !componentsDir.isDirectory()) {
throw new MojoExecutionException("Failed creating components directory (" + componentsDir + ")");
}
return componentsDir;
}
private void copyApplicationPackage(File applicationPackage, File applicationDestination) throws MojoExecutionException {
if (applicationPackage.exists()) {
try {
FileUtils.copyDirectory(applicationPackage, applicationDestination);
} catch (IOException e) {
throw new MojoExecutionException("Failed copying applicationPackage", e);
}
}
}
private void copyModuleBundles(File moduleDir, File componentsDir) throws MojoExecutionException {
File moduleTargetDir = new File(moduleDir, "target");
if (moduleTargetDir.exists()) {
File[] bundles = moduleTargetDir.listFiles((dir, name) -> name.endsWith("-deploy.jar") ||
name.endsWith("-bundle.jar") ||
name.endsWith("-jar-with-dependencies.jar"));
if (bundles == null) return;
for (File bundle : bundles) {
try {
copyFile(bundle, new File(componentsDir, bundle.getName()));
getLog().info("Copying bundle to application: " + bundle.getName());
} catch (IOException e) {
throw new MojoExecutionException("Failed copying bundle " + bundle, e);
}
}
}
}
private void copyFile(File source, File destination) throws IOException {
try (FileInputStream sourceStream = new FileInputStream(source);
FileOutputStream destinationStream = new FileOutputStream(destination)) {
sourceStream.transferTo(destinationStream);
}
}
private static <T> List<T> emptyListIfNull(List<T> modules) {
return modules == null ? Collections.emptyList(): modules;
}
} | class ApplicationMojo extends AbstractMojo {
@Parameter(defaultValue = "${project}", readonly = true)
protected MavenProject project;
@Parameter(property = "vespaversion")
private String vespaversion;
@Parameter(defaultValue = "src/main/application")
private String sourceDir;
@Parameter(defaultValue = "target/application")
private String destinationDir;
@Override
public void execute() throws MojoExecutionException {
File applicationPackage = new File(project.getBasedir(), sourceDir);
File applicationDestination = new File(project.getBasedir(), destinationDir);
copyApplicationPackage(applicationPackage, applicationDestination);
addBuildMetaData(applicationDestination);
File componentsDir = createComponentsDir(applicationDestination);
copyModuleBundles(project.getBasedir(), componentsDir);
copyBundlesForSubModules(componentsDir);
try {
Compression.zipDirectory(applicationDestination, "");
} catch (Exception e) {
throw new MojoExecutionException("Failed zipping application.", e);
}
}
/** Writes meta data about this package if the destination directory exists. */
private void addBuildMetaData(File applicationDestination) throws MojoExecutionException {
if ( ! applicationDestination.exists()) return;
if (vespaversion == null)
vespaversion = project.getPlugin("com.yahoo.vespa:vespa-application-maven-plugin").getVersion();
Version compileVersion = Version.from(vespaversion);
MavenProject current = project;
while (current.getParent() != null && current.getParent().getParentArtifact() != null)
current = current.getParent();
Version parentVersion = null;
Artifact parentArtifact = current.getParentArtifact();
if (parentArtifact != null && isVespaParent(parentArtifact.getGroupId())) {
parentVersion = Version.from(parentArtifact.getVersion());
if (parentVersion.compareTo(compileVersion) < 0)
throw new IllegalArgumentException("compile version (" + compileVersion + ") cannot be higher than parent version (" + parentVersion + ")");
}
String metaData = String.format("{\n" +
" \"compileVersion\": \"%s\",\n" +
" \"buildTime\": %d,\n" +
" \"parentVersion\": %s\n" +
"}",
compileVersion,
System.currentTimeMillis(),
parentVersion == null ? null : "\"" + parentVersion + "\"");
try {
Files.write(applicationDestination.toPath().resolve("build-meta.json"),
metaData.getBytes(StandardCharsets.UTF_8));
}
catch (IOException e) {
throw new MojoExecutionException("Failed writing compile version and build time.", e);
}
}
private void copyBundlesForSubModules(File componentsDir) throws MojoExecutionException {
List<String> modules = emptyListIfNull(project.getModules());
for (String module : modules) {
File moduleDir = new File(project.getBasedir(), module);
if (moduleDir.exists()) {
copyModuleBundles(moduleDir, componentsDir);
}
}
}
private File createComponentsDir(File applicationDestination) throws MojoExecutionException {
File componentsDir = new File(applicationDestination, "components");
componentsDir.mkdirs();
if (!componentsDir.exists() || !componentsDir.isDirectory()) {
throw new MojoExecutionException("Failed creating components directory (" + componentsDir + ")");
}
return componentsDir;
}
private void copyApplicationPackage(File applicationPackage, File applicationDestination) throws MojoExecutionException {
if (applicationPackage.exists()) {
try {
FileUtils.copyDirectory(applicationPackage, applicationDestination);
} catch (IOException e) {
throw new MojoExecutionException("Failed copying applicationPackage", e);
}
}
}
private void copyModuleBundles(File moduleDir, File componentsDir) throws MojoExecutionException {
File moduleTargetDir = new File(moduleDir, "target");
if (moduleTargetDir.exists()) {
File[] bundles = moduleTargetDir.listFiles((dir, name) -> name.endsWith("-deploy.jar") ||
name.endsWith("-bundle.jar") ||
name.endsWith("-jar-with-dependencies.jar"));
if (bundles == null) return;
for (File bundle : bundles) {
try {
copyFile(bundle, new File(componentsDir, bundle.getName()));
getLog().info("Copying bundle to application: " + bundle.getName());
} catch (IOException e) {
throw new MojoExecutionException("Failed copying bundle " + bundle, e);
}
}
}
}
private void copyFile(File source, File destination) throws IOException {
try (FileInputStream sourceStream = new FileInputStream(source);
FileOutputStream destinationStream = new FileOutputStream(destination)) {
sourceStream.transferTo(destinationStream);
}
}
private static <T> List<T> emptyListIfNull(List<T> modules) {
return modules == null ? Collections.emptyList(): modules;
}
} |
`String::matches` will match the entire string | static boolean isVespaParent(String groupId) {
return groupId.matches("(com\\.yahoo\\.vespa|ai\\.vespa)(\\..+)?");
} | return groupId.matches("(com\\.yahoo\\.vespa|ai\\.vespa)(\\..+)?"); | static boolean isVespaParent(String groupId) {
return groupId.matches("(com\\.yahoo\\.vespa|ai\\.vespa)(\\..+)?");
} | class ApplicationMojo extends AbstractMojo {
@Parameter(defaultValue = "${project}", readonly = true)
protected MavenProject project;
@Parameter(property = "vespaversion")
private String vespaversion;
@Parameter(defaultValue = "src/main/application")
private String sourceDir;
@Parameter(defaultValue = "target/application")
private String destinationDir;
@Override
public void execute() throws MojoExecutionException {
File applicationPackage = new File(project.getBasedir(), sourceDir);
File applicationDestination = new File(project.getBasedir(), destinationDir);
copyApplicationPackage(applicationPackage, applicationDestination);
addBuildMetaData(applicationDestination);
File componentsDir = createComponentsDir(applicationDestination);
copyModuleBundles(project.getBasedir(), componentsDir);
copyBundlesForSubModules(componentsDir);
try {
Compression.zipDirectory(applicationDestination, "");
} catch (Exception e) {
throw new MojoExecutionException("Failed zipping application.", e);
}
}
/** Writes meta data about this package if the destination directory exists. */
private void addBuildMetaData(File applicationDestination) throws MojoExecutionException {
if ( ! applicationDestination.exists()) return;
if (vespaversion == null)
vespaversion = project.getPlugin("com.yahoo.vespa:vespa-application-maven-plugin").getVersion();
Version compileVersion = Version.from(vespaversion);
MavenProject current = project;
while (current.getParent() != null && current.getParent().getParentArtifact() != null)
current = current.getParent();
Version parentVersion = null;
Artifact parentArtifact = current.getParentArtifact();
if (parentArtifact != null && isVespaParent(parentArtifact.getGroupId())) {
parentVersion = Version.from(parentArtifact.getVersion());
if (parentVersion.compareTo(compileVersion) < 0)
throw new IllegalArgumentException("compile version (" + compileVersion + ") cannot be higher than parent version (" + parentVersion + ")");
}
String metaData = String.format("{\n" +
" \"compileVersion\": \"%s\",\n" +
" \"buildTime\": %d,\n" +
" \"parentVersion\": %s\n" +
"}",
compileVersion,
System.currentTimeMillis(),
parentVersion == null ? null : "\"" + parentVersion + "\"");
try {
Files.write(applicationDestination.toPath().resolve("build-meta.json"),
metaData.getBytes(StandardCharsets.UTF_8));
}
catch (IOException e) {
throw new MojoExecutionException("Failed writing compile version and build time.", e);
}
}
private void copyBundlesForSubModules(File componentsDir) throws MojoExecutionException {
List<String> modules = emptyListIfNull(project.getModules());
for (String module : modules) {
File moduleDir = new File(project.getBasedir(), module);
if (moduleDir.exists()) {
copyModuleBundles(moduleDir, componentsDir);
}
}
}
private File createComponentsDir(File applicationDestination) throws MojoExecutionException {
File componentsDir = new File(applicationDestination, "components");
componentsDir.mkdirs();
if (!componentsDir.exists() || !componentsDir.isDirectory()) {
throw new MojoExecutionException("Failed creating components directory (" + componentsDir + ")");
}
return componentsDir;
}
private void copyApplicationPackage(File applicationPackage, File applicationDestination) throws MojoExecutionException {
if (applicationPackage.exists()) {
try {
FileUtils.copyDirectory(applicationPackage, applicationDestination);
} catch (IOException e) {
throw new MojoExecutionException("Failed copying applicationPackage", e);
}
}
}
private void copyModuleBundles(File moduleDir, File componentsDir) throws MojoExecutionException {
File moduleTargetDir = new File(moduleDir, "target");
if (moduleTargetDir.exists()) {
File[] bundles = moduleTargetDir.listFiles((dir, name) -> name.endsWith("-deploy.jar") ||
name.endsWith("-bundle.jar") ||
name.endsWith("-jar-with-dependencies.jar"));
if (bundles == null) return;
for (File bundle : bundles) {
try {
copyFile(bundle, new File(componentsDir, bundle.getName()));
getLog().info("Copying bundle to application: " + bundle.getName());
} catch (IOException e) {
throw new MojoExecutionException("Failed copying bundle " + bundle, e);
}
}
}
}
private void copyFile(File source, File destination) throws IOException {
try (FileInputStream sourceStream = new FileInputStream(source);
FileOutputStream destinationStream = new FileOutputStream(destination)) {
sourceStream.transferTo(destinationStream);
}
}
private static <T> List<T> emptyListIfNull(List<T> modules) {
return modules == null ? Collections.emptyList(): modules;
}
} | class ApplicationMojo extends AbstractMojo {
@Parameter(defaultValue = "${project}", readonly = true)
protected MavenProject project;
@Parameter(property = "vespaversion")
private String vespaversion;
@Parameter(defaultValue = "src/main/application")
private String sourceDir;
@Parameter(defaultValue = "target/application")
private String destinationDir;
@Override
public void execute() throws MojoExecutionException {
File applicationPackage = new File(project.getBasedir(), sourceDir);
File applicationDestination = new File(project.getBasedir(), destinationDir);
copyApplicationPackage(applicationPackage, applicationDestination);
addBuildMetaData(applicationDestination);
File componentsDir = createComponentsDir(applicationDestination);
copyModuleBundles(project.getBasedir(), componentsDir);
copyBundlesForSubModules(componentsDir);
try {
Compression.zipDirectory(applicationDestination, "");
} catch (Exception e) {
throw new MojoExecutionException("Failed zipping application.", e);
}
}
/** Writes meta data about this package if the destination directory exists. */
private void addBuildMetaData(File applicationDestination) throws MojoExecutionException {
if ( ! applicationDestination.exists()) return;
if (vespaversion == null)
vespaversion = project.getPlugin("com.yahoo.vespa:vespa-application-maven-plugin").getVersion();
Version compileVersion = Version.from(vespaversion);
MavenProject current = project;
while (current.getParent() != null && current.getParent().getParentArtifact() != null)
current = current.getParent();
Version parentVersion = null;
Artifact parentArtifact = current.getParentArtifact();
if (parentArtifact != null && isVespaParent(parentArtifact.getGroupId())) {
parentVersion = Version.from(parentArtifact.getVersion());
if (parentVersion.compareTo(compileVersion) < 0)
throw new IllegalArgumentException("compile version (" + compileVersion + ") cannot be higher than parent version (" + parentVersion + ")");
}
String metaData = String.format("{\n" +
" \"compileVersion\": \"%s\",\n" +
" \"buildTime\": %d,\n" +
" \"parentVersion\": %s\n" +
"}",
compileVersion,
System.currentTimeMillis(),
parentVersion == null ? null : "\"" + parentVersion + "\"");
try {
Files.write(applicationDestination.toPath().resolve("build-meta.json"),
metaData.getBytes(StandardCharsets.UTF_8));
}
catch (IOException e) {
throw new MojoExecutionException("Failed writing compile version and build time.", e);
}
}
private void copyBundlesForSubModules(File componentsDir) throws MojoExecutionException {
List<String> modules = emptyListIfNull(project.getModules());
for (String module : modules) {
File moduleDir = new File(project.getBasedir(), module);
if (moduleDir.exists()) {
copyModuleBundles(moduleDir, componentsDir);
}
}
}
private File createComponentsDir(File applicationDestination) throws MojoExecutionException {
File componentsDir = new File(applicationDestination, "components");
componentsDir.mkdirs();
if (!componentsDir.exists() || !componentsDir.isDirectory()) {
throw new MojoExecutionException("Failed creating components directory (" + componentsDir + ")");
}
return componentsDir;
}
private void copyApplicationPackage(File applicationPackage, File applicationDestination) throws MojoExecutionException {
if (applicationPackage.exists()) {
try {
FileUtils.copyDirectory(applicationPackage, applicationDestination);
} catch (IOException e) {
throw new MojoExecutionException("Failed copying applicationPackage", e);
}
}
}
private void copyModuleBundles(File moduleDir, File componentsDir) throws MojoExecutionException {
File moduleTargetDir = new File(moduleDir, "target");
if (moduleTargetDir.exists()) {
File[] bundles = moduleTargetDir.listFiles((dir, name) -> name.endsWith("-deploy.jar") ||
name.endsWith("-bundle.jar") ||
name.endsWith("-jar-with-dependencies.jar"));
if (bundles == null) return;
for (File bundle : bundles) {
try {
copyFile(bundle, new File(componentsDir, bundle.getName()));
getLog().info("Copying bundle to application: " + bundle.getName());
} catch (IOException e) {
throw new MojoExecutionException("Failed copying bundle " + bundle, e);
}
}
}
}
private void copyFile(File source, File destination) throws IOException {
try (FileInputStream sourceStream = new FileInputStream(source);
FileOutputStream destinationStream = new FileOutputStream(destination)) {
sourceStream.transferTo(destinationStream);
}
}
private static <T> List<T> emptyListIfNull(List<T> modules) {
return modules == null ? Collections.emptyList(): modules;
}
} |
Urgh, yes, it was "startsWith" earlier. | static boolean isVespaParent(String groupId) {
return groupId.matches("(com\\.yahoo\\.vespa|ai\\.vespa)(\\..+)?");
} | return groupId.matches("(com\\.yahoo\\.vespa|ai\\.vespa)(\\..+)?"); | static boolean isVespaParent(String groupId) {
return groupId.matches("(com\\.yahoo\\.vespa|ai\\.vespa)(\\..+)?");
} | class ApplicationMojo extends AbstractMojo {
@Parameter(defaultValue = "${project}", readonly = true)
protected MavenProject project;
@Parameter(property = "vespaversion")
private String vespaversion;
@Parameter(defaultValue = "src/main/application")
private String sourceDir;
@Parameter(defaultValue = "target/application")
private String destinationDir;
@Override
public void execute() throws MojoExecutionException {
File applicationPackage = new File(project.getBasedir(), sourceDir);
File applicationDestination = new File(project.getBasedir(), destinationDir);
copyApplicationPackage(applicationPackage, applicationDestination);
addBuildMetaData(applicationDestination);
File componentsDir = createComponentsDir(applicationDestination);
copyModuleBundles(project.getBasedir(), componentsDir);
copyBundlesForSubModules(componentsDir);
try {
Compression.zipDirectory(applicationDestination, "");
} catch (Exception e) {
throw new MojoExecutionException("Failed zipping application.", e);
}
}
/** Writes meta data about this package if the destination directory exists. */
private void addBuildMetaData(File applicationDestination) throws MojoExecutionException {
if ( ! applicationDestination.exists()) return;
if (vespaversion == null)
vespaversion = project.getPlugin("com.yahoo.vespa:vespa-application-maven-plugin").getVersion();
Version compileVersion = Version.from(vespaversion);
MavenProject current = project;
while (current.getParent() != null && current.getParent().getParentArtifact() != null)
current = current.getParent();
Version parentVersion = null;
Artifact parentArtifact = current.getParentArtifact();
if (parentArtifact != null && isVespaParent(parentArtifact.getGroupId())) {
parentVersion = Version.from(parentArtifact.getVersion());
if (parentVersion.compareTo(compileVersion) < 0)
throw new IllegalArgumentException("compile version (" + compileVersion + ") cannot be higher than parent version (" + parentVersion + ")");
}
String metaData = String.format("{\n" +
" \"compileVersion\": \"%s\",\n" +
" \"buildTime\": %d,\n" +
" \"parentVersion\": %s\n" +
"}",
compileVersion,
System.currentTimeMillis(),
parentVersion == null ? null : "\"" + parentVersion + "\"");
try {
Files.write(applicationDestination.toPath().resolve("build-meta.json"),
metaData.getBytes(StandardCharsets.UTF_8));
}
catch (IOException e) {
throw new MojoExecutionException("Failed writing compile version and build time.", e);
}
}
private void copyBundlesForSubModules(File componentsDir) throws MojoExecutionException {
List<String> modules = emptyListIfNull(project.getModules());
for (String module : modules) {
File moduleDir = new File(project.getBasedir(), module);
if (moduleDir.exists()) {
copyModuleBundles(moduleDir, componentsDir);
}
}
}
private File createComponentsDir(File applicationDestination) throws MojoExecutionException {
File componentsDir = new File(applicationDestination, "components");
componentsDir.mkdirs();
if (!componentsDir.exists() || !componentsDir.isDirectory()) {
throw new MojoExecutionException("Failed creating components directory (" + componentsDir + ")");
}
return componentsDir;
}
private void copyApplicationPackage(File applicationPackage, File applicationDestination) throws MojoExecutionException {
if (applicationPackage.exists()) {
try {
FileUtils.copyDirectory(applicationPackage, applicationDestination);
} catch (IOException e) {
throw new MojoExecutionException("Failed copying applicationPackage", e);
}
}
}
private void copyModuleBundles(File moduleDir, File componentsDir) throws MojoExecutionException {
File moduleTargetDir = new File(moduleDir, "target");
if (moduleTargetDir.exists()) {
File[] bundles = moduleTargetDir.listFiles((dir, name) -> name.endsWith("-deploy.jar") ||
name.endsWith("-bundle.jar") ||
name.endsWith("-jar-with-dependencies.jar"));
if (bundles == null) return;
for (File bundle : bundles) {
try {
copyFile(bundle, new File(componentsDir, bundle.getName()));
getLog().info("Copying bundle to application: " + bundle.getName());
} catch (IOException e) {
throw new MojoExecutionException("Failed copying bundle " + bundle, e);
}
}
}
}
private void copyFile(File source, File destination) throws IOException {
try (FileInputStream sourceStream = new FileInputStream(source);
FileOutputStream destinationStream = new FileOutputStream(destination)) {
sourceStream.transferTo(destinationStream);
}
}
private static <T> List<T> emptyListIfNull(List<T> modules) {
return modules == null ? Collections.emptyList(): modules;
}
} | class ApplicationMojo extends AbstractMojo {
@Parameter(defaultValue = "${project}", readonly = true)
protected MavenProject project;
@Parameter(property = "vespaversion")
private String vespaversion;
@Parameter(defaultValue = "src/main/application")
private String sourceDir;
@Parameter(defaultValue = "target/application")
private String destinationDir;
@Override
public void execute() throws MojoExecutionException {
File applicationPackage = new File(project.getBasedir(), sourceDir);
File applicationDestination = new File(project.getBasedir(), destinationDir);
copyApplicationPackage(applicationPackage, applicationDestination);
addBuildMetaData(applicationDestination);
File componentsDir = createComponentsDir(applicationDestination);
copyModuleBundles(project.getBasedir(), componentsDir);
copyBundlesForSubModules(componentsDir);
try {
Compression.zipDirectory(applicationDestination, "");
} catch (Exception e) {
throw new MojoExecutionException("Failed zipping application.", e);
}
}
/** Writes meta data about this package if the destination directory exists. */
private void addBuildMetaData(File applicationDestination) throws MojoExecutionException {
if ( ! applicationDestination.exists()) return;
if (vespaversion == null)
vespaversion = project.getPlugin("com.yahoo.vespa:vespa-application-maven-plugin").getVersion();
Version compileVersion = Version.from(vespaversion);
MavenProject current = project;
while (current.getParent() != null && current.getParent().getParentArtifact() != null)
current = current.getParent();
Version parentVersion = null;
Artifact parentArtifact = current.getParentArtifact();
if (parentArtifact != null && isVespaParent(parentArtifact.getGroupId())) {
parentVersion = Version.from(parentArtifact.getVersion());
if (parentVersion.compareTo(compileVersion) < 0)
throw new IllegalArgumentException("compile version (" + compileVersion + ") cannot be higher than parent version (" + parentVersion + ")");
}
String metaData = String.format("{\n" +
" \"compileVersion\": \"%s\",\n" +
" \"buildTime\": %d,\n" +
" \"parentVersion\": %s\n" +
"}",
compileVersion,
System.currentTimeMillis(),
parentVersion == null ? null : "\"" + parentVersion + "\"");
try {
Files.write(applicationDestination.toPath().resolve("build-meta.json"),
metaData.getBytes(StandardCharsets.UTF_8));
}
catch (IOException e) {
throw new MojoExecutionException("Failed writing compile version and build time.", e);
}
}
private void copyBundlesForSubModules(File componentsDir) throws MojoExecutionException {
List<String> modules = emptyListIfNull(project.getModules());
for (String module : modules) {
File moduleDir = new File(project.getBasedir(), module);
if (moduleDir.exists()) {
copyModuleBundles(moduleDir, componentsDir);
}
}
}
private File createComponentsDir(File applicationDestination) throws MojoExecutionException {
File componentsDir = new File(applicationDestination, "components");
componentsDir.mkdirs();
if (!componentsDir.exists() || !componentsDir.isDirectory()) {
throw new MojoExecutionException("Failed creating components directory (" + componentsDir + ")");
}
return componentsDir;
}
private void copyApplicationPackage(File applicationPackage, File applicationDestination) throws MojoExecutionException {
if (applicationPackage.exists()) {
try {
FileUtils.copyDirectory(applicationPackage, applicationDestination);
} catch (IOException e) {
throw new MojoExecutionException("Failed copying applicationPackage", e);
}
}
}
private void copyModuleBundles(File moduleDir, File componentsDir) throws MojoExecutionException {
File moduleTargetDir = new File(moduleDir, "target");
if (moduleTargetDir.exists()) {
File[] bundles = moduleTargetDir.listFiles((dir, name) -> name.endsWith("-deploy.jar") ||
name.endsWith("-bundle.jar") ||
name.endsWith("-jar-with-dependencies.jar"));
if (bundles == null) return;
for (File bundle : bundles) {
try {
copyFile(bundle, new File(componentsDir, bundle.getName()));
getLog().info("Copying bundle to application: " + bundle.getName());
} catch (IOException e) {
throw new MojoExecutionException("Failed copying bundle " + bundle, e);
}
}
}
}
private void copyFile(File source, File destination) throws IOException {
try (FileInputStream sourceStream = new FileInputStream(source);
FileOutputStream destinationStream = new FileOutputStream(destination)) {
sourceStream.transferTo(destinationStream);
}
}
private static <T> List<T> emptyListIfNull(List<T> modules) {
return modules == null ? Collections.emptyList(): modules;
}
} |
Can probably remove this comment now | private static Set<Metric> getDistributorMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("vds.idealstate.buckets_rechecking.average"));
metrics.add(new Metric("vds.idealstate.idealstate_diff.average"));
metrics.add(new Metric("vds.idealstate.buckets_toofewcopies.average"));
metrics.add(new Metric("vds.idealstate.buckets_toomanycopies.average"));
metrics.add(new Metric("vds.idealstate.buckets.average"));
metrics.add(new Metric("vds.idealstate.buckets_notrusted.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_moving_out.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_copying_out.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_copying_in.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_syncing.average"));
metrics.add(new Metric("vds.idealstate.max_observed_time_since_last_gc_sec.average"));
metrics.add(new Metric("vds.idealstate.delete_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.delete_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.delete_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.merge_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.merge_bucket.blocked.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.throttled.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_changed.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_delete_blocked.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_delete_failed.rate"));
metrics.add(new Metric("vds.idealstate.split_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.split_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.split_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.join_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.join_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.join_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.garbage_collection.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.garbage_collection.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.garbage_collection.pending.average"));
metrics.add(new Metric("vds.idealstate.garbage_collection.documents_removed.count"));
metrics.add(new Metric("vds.idealstate.garbage_collection.documents_removed.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.max"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.count"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.average"));
metrics.add(new Metric("vds.distributor.puts.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.test_and_set_failed.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.concurrent_mutations.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.notconnected.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.notready.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.wrongdistributor.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.safe_time_not_reached.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.storagefailure.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.timeout.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.busy.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.inconsistent_bucket.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.max"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.count"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.average"));
metrics.add(new Metric("vds.distributor.removes.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.test_and_set_failed.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.concurrent_mutations.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.max"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.count"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.average"));
metrics.add(new Metric("vds.distributor.updates.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.test_and_set_failed.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.concurrent_mutations.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.diverging_timestamp_updates.rate"));
metrics.add(new Metric("vds.distributor.removelocations.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.removelocations.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.max"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.count"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.average"));
metrics.add(new Metric("vds.distributor.gets.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.gets.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.gets.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.max"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.count"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.average"));
metrics.add(new Metric("vds.distributor.visitor.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.notready.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.notconnected.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.wrongdistributor.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.safe_time_not_reached.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.storagefailure.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.timeout.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.busy.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.inconsistent_bucket.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.docsstored.average"));
metrics.add(new Metric("vds.distributor.bytesstored.average"));
metrics.add(new Metric("vds.bouncer.clock_skew_aborts.count"));
metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.max"));
metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.sum"));
metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.count"));
metrics.add(new Metric("vds.mergethrottler.queuesize.max"));
metrics.add(new Metric("vds.mergethrottler.queuesize.sum"));
metrics.add(new Metric("vds.mergethrottler.queuesize.count"));
metrics.add(new Metric("vds.mergethrottler.active_window_size.max"));
metrics.add(new Metric("vds.mergethrottler.active_window_size.sum"));
metrics.add(new Metric("vds.mergethrottler.active_window_size.count"));
metrics.add(new Metric("vds.mergethrottler.bounced_due_to_back_pressure.rate"));
metrics.add(new Metric("vds.mergethrottler.locallyexecutedmerges.ok.rate"));
metrics.add(new Metric("vds.mergethrottler.mergechains.ok.rate"));
metrics.add(new Metric("vds.mergethrottler.mergechains.failures.busy.rate"));
metrics.add(new Metric("vds.mergethrottler.mergechains.failures.total.rate"));
return metrics;
} | private static Set<Metric> getDistributorMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("vds.idealstate.buckets_rechecking.average"));
metrics.add(new Metric("vds.idealstate.idealstate_diff.average"));
metrics.add(new Metric("vds.idealstate.buckets_toofewcopies.average"));
metrics.add(new Metric("vds.idealstate.buckets_toomanycopies.average"));
metrics.add(new Metric("vds.idealstate.buckets.average"));
metrics.add(new Metric("vds.idealstate.buckets_notrusted.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_moving_out.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_copying_out.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_copying_in.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_syncing.average"));
metrics.add(new Metric("vds.idealstate.max_observed_time_since_last_gc_sec.average"));
metrics.add(new Metric("vds.idealstate.delete_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.delete_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.delete_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.merge_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.merge_bucket.blocked.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.throttled.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_changed.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_delete_blocked.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_delete_failed.rate"));
metrics.add(new Metric("vds.idealstate.split_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.split_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.split_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.join_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.join_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.join_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.garbage_collection.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.garbage_collection.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.garbage_collection.pending.average"));
metrics.add(new Metric("vds.idealstate.garbage_collection.documents_removed.count"));
metrics.add(new Metric("vds.idealstate.garbage_collection.documents_removed.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.max"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.count"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.average"));
metrics.add(new Metric("vds.distributor.puts.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.test_and_set_failed.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.concurrent_mutations.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.notconnected.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.notready.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.wrongdistributor.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.safe_time_not_reached.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.storagefailure.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.timeout.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.busy.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.inconsistent_bucket.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.max"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.count"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.average"));
metrics.add(new Metric("vds.distributor.removes.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.test_and_set_failed.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.concurrent_mutations.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.max"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.count"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.average"));
metrics.add(new Metric("vds.distributor.updates.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.test_and_set_failed.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.concurrent_mutations.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.diverging_timestamp_updates.rate"));
metrics.add(new Metric("vds.distributor.removelocations.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.removelocations.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.max"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.count"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.average"));
metrics.add(new Metric("vds.distributor.gets.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.gets.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.gets.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.max"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.count"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.average"));
metrics.add(new Metric("vds.distributor.visitor.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.notready.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.notconnected.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.wrongdistributor.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.safe_time_not_reached.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.storagefailure.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.timeout.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.busy.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.inconsistent_bucket.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.docsstored.average"));
metrics.add(new Metric("vds.distributor.bytesstored.average"));
metrics.add(new Metric("vds.bouncer.clock_skew_aborts.count"));
metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.max"));
metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.sum"));
metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.count"));
metrics.add(new Metric("vds.mergethrottler.queuesize.max"));
metrics.add(new Metric("vds.mergethrottler.queuesize.sum"));
metrics.add(new Metric("vds.mergethrottler.queuesize.count"));
metrics.add(new Metric("vds.mergethrottler.active_window_size.max"));
metrics.add(new Metric("vds.mergethrottler.active_window_size.sum"));
metrics.add(new Metric("vds.mergethrottler.active_window_size.count"));
metrics.add(new Metric("vds.mergethrottler.bounced_due_to_back_pressure.rate"));
metrics.add(new Metric("vds.mergethrottler.locallyexecutedmerges.ok.rate"));
metrics.add(new Metric("vds.mergethrottler.mergechains.ok.rate"));
metrics.add(new Metric("vds.mergethrottler.mergechains.failures.busy.rate"));
metrics.add(new Metric("vds.mergethrottler.mergechains.failures.total.rate"));
return metrics;
} | class VespaMetricSet {
public static final MetricSet vespaMetricSet = new MetricSet("vespa",
getVespaMetrics(),
singleton(defaultVespaMetricSet));
private static Set<Metric> getVespaMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.addAll(getSearchNodeMetrics());
metrics.addAll(getStorageMetrics());
metrics.addAll(getDistributorMetrics());
metrics.addAll(getDocprocMetrics());
metrics.addAll(getClusterControllerMetrics());
metrics.addAll(getQrserverMetrics());
metrics.addAll(getContainerMetrics());
metrics.addAll(getConfigServerMetrics());
metrics.addAll(getSentinelMetrics());
metrics.addAll(getOtherMetrics());
return Collections.unmodifiableSet(metrics);
}
private static Set<Metric> getSentinelMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("sentinel.restarts.count"));
metrics.add(new Metric("sentinel.totalRestarts.last"));
metrics.add(new Metric("sentinel.uptime.last"));
metrics.add(new Metric("sentinel.running.count"));
metrics.add(new Metric("sentinel.running.last"));
return metrics;
}
private static Set<Metric> getOtherMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("slobrok.heartbeats.failed.count"));
metrics.add(new Metric("logd.processed.lines.count"));
metrics.add(new Metric("worker.connections.max"));
metrics.add(new Metric("endpoint.certificate.expiry.seconds"));
metrics.add(new Metric("jrt.transport.tls-certificate-verification-failures"));
metrics.add(new Metric("jrt.transport.peer-authorization-failures"));
metrics.add(new Metric("jrt.transport.server.tls-connections-established"));
metrics.add(new Metric("jrt.transport.client.tls-connections-established"));
metrics.add(new Metric("jrt.transport.server.unencrypted-connections-established"));
metrics.add(new Metric("jrt.transport.client.unencrypted-connections-established"));
metrics.add(new Metric("vds.server.network.tls-handshakes-failed"));
metrics.add(new Metric("vds.server.network.peer-authorization-failures"));
metrics.add(new Metric("vds.server.network.client.tls-connections-established"));
metrics.add(new Metric("vds.server.network.server.tls-connections-established"));
metrics.add(new Metric("vds.server.network.client.insecure-connections-established"));
metrics.add(new Metric("vds.server.network.server.insecure-connections-established"));
metrics.add(new Metric("vds.server.network.tls-connections-broken"));
metrics.add(new Metric("vds.server.network.failed-tls-config-reloads"));
metrics.add(new Metric("vds.server.fnet.num-connections"));
metrics.add(new Metric("node-certificate.expiry.seconds"));
return metrics;
}
private static Set<Metric> getConfigServerMetrics() {
Set<Metric> metrics =new LinkedHashSet<>();
metrics.add(new Metric("configserver.requests.count"));
metrics.add(new Metric("configserver.failedRequests.count"));
metrics.add(new Metric("configserver.latency.max"));
metrics.add(new Metric("configserver.latency.sum"));
metrics.add(new Metric("configserver.latency.count"));
metrics.add(new Metric("configserver.latency.average"));
metrics.add(new Metric("configserver.cacheConfigElems.last"));
metrics.add(new Metric("configserver.cacheChecksumElems.last"));
metrics.add(new Metric("configserver.hosts.last"));
metrics.add(new Metric("configserver.delayedResponses.count"));
metrics.add(new Metric("configserver.sessionChangeErrors.count"));
metrics.add(new Metric("configserver.zkZNodes.last"));
metrics.add(new Metric("configserver.zkAvgLatency.last"));
metrics.add(new Metric("configserver.zkMaxLatency.last"));
metrics.add(new Metric("configserver.zkConnections.last"));
metrics.add(new Metric("configserver.zkOutstandingRequests.last"));
return metrics;
}
private static Set<Metric> getContainerMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
addMetric(metrics, "jdisc.http.requests", List.of("rate", "count"));
metrics.add(new Metric("handled.requests.count"));
metrics.add(new Metric("handled.latency.max"));
metrics.add(new Metric("handled.latency.sum"));
metrics.add(new Metric("handled.latency.count"));
metrics.add(new Metric("handled.latency.average"));
metrics.add(new Metric("serverRejectedRequests.rate"));
metrics.add(new Metric("serverRejectedRequests.count"));
metrics.add(new Metric("serverThreadPoolSize.average"));
metrics.add(new Metric("serverThreadPoolSize.min"));
metrics.add(new Metric("serverThreadPoolSize.max"));
metrics.add(new Metric("serverThreadPoolSize.rate"));
metrics.add(new Metric("serverThreadPoolSize.count"));
metrics.add(new Metric("serverThreadPoolSize.last"));
metrics.add(new Metric("serverActiveThreads.average"));
metrics.add(new Metric("serverActiveThreads.min"));
metrics.add(new Metric("serverActiveThreads.max"));
metrics.add(new Metric("serverActiveThreads.rate"));
metrics.add(new Metric("serverActiveThreads.sum"));
metrics.add(new Metric("serverActiveThreads.count"));
metrics.add(new Metric("serverActiveThreads.last"));
metrics.add(new Metric("serverNumOpenConnections.average"));
metrics.add(new Metric("serverNumOpenConnections.max"));
metrics.add(new Metric("serverNumOpenConnections.last"));
metrics.add(new Metric("serverNumConnections.average"));
metrics.add(new Metric("serverNumConnections.max"));
metrics.add(new Metric("serverNumConnections.last"));
metrics.add(new Metric("serverBytesReceived.sum"));
metrics.add(new Metric("serverBytesReceived.count"));
metrics.add(new Metric("serverBytesSent.sum"));
metrics.add(new Metric("serverBytesSent.count"));
{
List<String> suffixes = List.of("sum", "count", "last", "min", "max");
addMetric(metrics, "jdisc.thread_pool.unhandled_exceptions", suffixes);
addMetric(metrics, "jdisc.thread_pool.work_queue.capacity", suffixes);
addMetric(metrics, "jdisc.thread_pool.work_queue.size", suffixes);
addMetric(metrics, "jdisc.thread_pool.rejected_tasks", suffixes);
addMetric(metrics, "jdisc.thread_pool.size", suffixes);
addMetric(metrics, "jdisc.thread_pool.max_allowed_size", suffixes);
addMetric(metrics, "jdisc.thread_pool.active_threads", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.max", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.min", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.reserved", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.busy", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.total", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.queue.size", suffixes);
}
metrics.add(new Metric("httpapi_latency.max"));
metrics.add(new Metric("httpapi_latency.sum"));
metrics.add(new Metric("httpapi_latency.count"));
metrics.add(new Metric("httpapi_latency.average"));
metrics.add(new Metric("httpapi_pending.max"));
metrics.add(new Metric("httpapi_pending.sum"));
metrics.add(new Metric("httpapi_pending.count"));
metrics.add(new Metric("httpapi_pending.average"));
metrics.add(new Metric("httpapi_num_operations.rate"));
metrics.add(new Metric("httpapi_num_updates.rate"));
metrics.add(new Metric("httpapi_num_removes.rate"));
metrics.add(new Metric("httpapi_num_puts.rate"));
metrics.add(new Metric("httpapi_succeeded.rate"));
metrics.add(new Metric("httpapi_failed.rate"));
metrics.add(new Metric("httpapi_parse_error.rate"));
addMetric(metrics, "httpapi_condition_not_met", List.of("rate"));
addMetric(metrics, "httpapi_not_found", List.of("rate"));
metrics.add(new Metric("mem.heap.total.average"));
metrics.add(new Metric("mem.heap.free.average"));
metrics.add(new Metric("mem.heap.used.average"));
metrics.add(new Metric("mem.heap.used.max"));
metrics.add(new Metric("jdisc.memory_mappings.max"));
metrics.add(new Metric("jdisc.open_file_descriptors.max"));
metrics.add(new Metric("mem.direct.total.average"));
metrics.add(new Metric("mem.direct.free.average"));
metrics.add(new Metric("mem.direct.used.average"));
metrics.add(new Metric("mem.direct.used.max"));
metrics.add(new Metric("mem.direct.count.max"));
metrics.add(new Metric("mem.native.total.average"));
metrics.add(new Metric("mem.native.free.average"));
metrics.add(new Metric("mem.native.used.average"));
metrics.add(new Metric("mem.native.used.max"));
metrics.add(new Metric("jdisc.gc.count.average"));
metrics.add(new Metric("jdisc.gc.count.max"));
metrics.add(new Metric("jdisc.gc.count.last"));
metrics.add(new Metric("jdisc.gc.ms.average"));
metrics.add(new Metric("jdisc.gc.ms.max"));
metrics.add(new Metric("jdisc.gc.ms.last"));
metrics.add(new Metric("jdisc.deactivated_containers.total.last"));
metrics.add(new Metric("jdisc.deactivated_containers.with_retained_refs.last"));
metrics.add(new Metric("athenz-tenant-cert.expiry.seconds.last"));
metrics.add(new Metric("container-iam-role.expiry.seconds"));
metrics.add(new Metric("jdisc.http.request.prematurely_closed.rate"));
addMetric(metrics, "jdisc.http.request.requests_per_connection", List.of("sum", "count", "min", "max", "average"));
metrics.add(new Metric("http.status.1xx.rate"));
metrics.add(new Metric("http.status.2xx.rate"));
metrics.add(new Metric("http.status.3xx.rate"));
metrics.add(new Metric("http.status.4xx.rate"));
metrics.add(new Metric("http.status.5xx.rate"));
metrics.add(new Metric("http.status.401.rate"));
metrics.add(new Metric("http.status.403.rate"));
metrics.add(new Metric("jdisc.http.request.uri_length.max"));
metrics.add(new Metric("jdisc.http.request.uri_length.sum"));
metrics.add(new Metric("jdisc.http.request.uri_length.count"));
metrics.add(new Metric("jdisc.http.request.uri_length.average"));
metrics.add(new Metric("jdisc.http.request.content_size.max"));
metrics.add(new Metric("jdisc.http.request.content_size.sum"));
metrics.add(new Metric("jdisc.http.request.content_size.count"));
metrics.add(new Metric("jdisc.http.request.content_size.average"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.missing_client_cert.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.expired_client_cert.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.invalid_client_cert.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.incompatible_protocols.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.incompatible_ciphers.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.unknown.rate"));
metrics.add(new Metric("jdisc.http.handler.unhandled_exceptions.rate"));
addMetric(metrics, "jdisc.http.filtering.request.handled", List.of("rate"));
addMetric(metrics, "jdisc.http.filtering.request.unhandled", List.of("rate"));
addMetric(metrics, "jdisc.http.filtering.response.handled", List.of("rate"));
addMetric(metrics, "jdisc.http.filtering.response.unhandled", List.of("rate"));
addMetric(metrics, "jdisc.application.failed_component_graphs", List.of("rate"));
addMetric(metrics, "jdisc.http.filter.rule.blocked_requests", List.of("rate"));
addMetric(metrics, "jdisc.http.filter.rule.allowed_requests", List.of("rate"));
return metrics;
}
private static Set<Metric> getClusterControllerMetrics() {
Set<Metric> metrics =new LinkedHashSet<>();
metrics.add(new Metric("cluster-controller.down.count.last"));
metrics.add(new Metric("cluster-controller.initializing.count.last"));
metrics.add(new Metric("cluster-controller.maintenance.count.last"));
metrics.add(new Metric("cluster-controller.retired.count.last"));
metrics.add(new Metric("cluster-controller.stopping.count.last"));
metrics.add(new Metric("cluster-controller.up.count.last"));
metrics.add(new Metric("cluster-controller.cluster-state-change.count"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.last"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.max"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.sum"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.count"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.last"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.max"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.sum"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.count"));
metrics.add(new Metric("cluster-controller.work-ms.last"));
metrics.add(new Metric("cluster-controller.work-ms.sum"));
metrics.add(new Metric("cluster-controller.work-ms.count"));
metrics.add(new Metric("cluster-controller.is-master.last"));
metrics.add(new Metric("cluster-controller.remote-task-queue.size.last"));
metrics.add(new Metric("cluster-controller.node-event.count"));
metrics.add(new Metric("cluster-controller.resource_usage.nodes_above_limit.last"));
metrics.add(new Metric("cluster-controller.resource_usage.nodes_above_limit.max"));
metrics.add(new Metric("cluster-controller.resource_usage.max_memory_utilization.last"));
metrics.add(new Metric("cluster-controller.resource_usage.max_memory_utilization.max"));
metrics.add(new Metric("cluster-controller.resource_usage.max_disk_utilization.last"));
metrics.add(new Metric("cluster-controller.resource_usage.max_disk_utilization.max"));
metrics.add(new Metric("cluster-controller.resource_usage.disk_limit.last"));
metrics.add(new Metric("cluster-controller.resource_usage.memory_limit.last"));
metrics.add(new Metric("reindexing.progress.last"));
return metrics;
}
private static Set<Metric> getDocprocMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("documents_processed.rate"));
return metrics;
}
private static Set<Metric> getQrserverMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("peak_qps.max"));
metrics.add(new Metric("search_connections.max"));
metrics.add(new Metric("search_connections.sum"));
metrics.add(new Metric("search_connections.count"));
metrics.add(new Metric("search_connections.average"));
metrics.add(new Metric("feed.latency.max"));
metrics.add(new Metric("feed.latency.sum"));
metrics.add(new Metric("feed.latency.count"));
metrics.add(new Metric("feed.latency.average"));
metrics.add(new Metric("feed.http-requests.count"));
metrics.add(new Metric("feed.http-requests.rate"));
metrics.add(new Metric("queries.rate"));
metrics.add(new Metric("query_container_latency.max"));
metrics.add(new Metric("query_container_latency.sum"));
metrics.add(new Metric("query_container_latency.count"));
metrics.add(new Metric("query_container_latency.average"));
metrics.add(new Metric("query_latency.max"));
metrics.add(new Metric("query_latency.sum"));
metrics.add(new Metric("query_latency.count"));
metrics.add(new Metric("query_latency.average"));
metrics.add(new Metric("query_latency.95percentile"));
metrics.add(new Metric("query_latency.99percentile"));
metrics.add(new Metric("failed_queries.rate"));
metrics.add(new Metric("degraded_queries.rate"));
metrics.add(new Metric("hits_per_query.max"));
metrics.add(new Metric("hits_per_query.sum"));
metrics.add(new Metric("hits_per_query.count"));
metrics.add(new Metric("hits_per_query.average"));
metrics.add(new Metric("hits_per_query.95percentile"));
metrics.add(new Metric("hits_per_query.99percentile"));
metrics.add(new Metric("query_hit_offset.max"));
metrics.add(new Metric("query_hit_offset.sum"));
metrics.add(new Metric("query_hit_offset.count"));
metrics.add(new Metric("documents_covered.count"));
metrics.add(new Metric("documents_total.count"));
metrics.add(new Metric("dispatch_internal.rate"));
metrics.add(new Metric("dispatch_fdispatch.rate"));
addMetric(metrics, "jdisc.render.latency", Set.of("min", "max", "count", "sum", "last", "average"));
addMetric(metrics, "query_item_count", Set.of("max", "sum", "count"));
metrics.add(new Metric("totalhits_per_query.max"));
metrics.add(new Metric("totalhits_per_query.sum"));
metrics.add(new Metric("totalhits_per_query.count"));
metrics.add(new Metric("totalhits_per_query.average"));
metrics.add(new Metric("totalhits_per_query.95percentile"));
metrics.add(new Metric("totalhits_per_query.99percentile"));
metrics.add(new Metric("empty_results.rate"));
metrics.add(new Metric("requestsOverQuota.rate"));
metrics.add(new Metric("requestsOverQuota.count"));
metrics.add(new Metric("relevance.at_1.sum"));
metrics.add(new Metric("relevance.at_1.count"));
metrics.add(new Metric("relevance.at_1.average"));
metrics.add(new Metric("relevance.at_3.sum"));
metrics.add(new Metric("relevance.at_3.count"));
metrics.add(new Metric("relevance.at_3.average"));
metrics.add(new Metric("relevance.at_10.sum"));
metrics.add(new Metric("relevance.at_10.count"));
metrics.add(new Metric("relevance.at_10.average"));
metrics.add(new Metric("error.timeout.rate"));
metrics.add(new Metric("error.backends_oos.rate"));
metrics.add(new Metric("error.plugin_failure.rate"));
metrics.add(new Metric("error.backend_communication_error.rate"));
metrics.add(new Metric("error.empty_document_summaries.rate"));
metrics.add(new Metric("error.invalid_query_parameter.rate"));
metrics.add(new Metric("error.internal_server_error.rate"));
metrics.add(new Metric("error.misconfigured_server.rate"));
metrics.add(new Metric("error.invalid_query_transformation.rate"));
metrics.add(new Metric("error.result_with_errors.rate"));
metrics.add(new Metric("error.unspecified.rate"));
metrics.add(new Metric("error.unhandled_exception.rate"));
return metrics;
}
private static void addSearchNodeExecutorMetrics(Set<Metric> metrics, String prefix) {
metrics.add(new Metric(prefix + ".queuesize.max"));
metrics.add(new Metric(prefix + ".queuesize.sum"));
metrics.add(new Metric(prefix + ".queuesize.count"));
metrics.add(new Metric(prefix + ".maxpending.last"));
metrics.add(new Metric(prefix + ".accepted.rate"));
metrics.add(new Metric(prefix + ".wakeups.rate"));
metrics.add(new Metric(prefix + ".utilization.max"));
metrics.add(new Metric(prefix + ".utilization.sum"));
metrics.add(new Metric(prefix + ".utilization.count"));
}
private static Set<Metric> getSearchNodeMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("content.proton.documentdb.documents.total.last"));
metrics.add(new Metric("content.proton.documentdb.documents.ready.last"));
metrics.add(new Metric("content.proton.documentdb.documents.active.last"));
metrics.add(new Metric("content.proton.documentdb.documents.removed.last"));
metrics.add(new Metric("content.proton.documentdb.index.docs_in_memory.last"));
metrics.add(new Metric("content.proton.documentdb.disk_usage.last"));
metrics.add(new Metric("content.proton.documentdb.memory_usage.allocated_bytes.max"));
metrics.add(new Metric("content.proton.documentdb.heart_beat_age.last"));
metrics.add(new Metric("content.proton.transport.query.count.rate"));
metrics.add(new Metric("content.proton.docsum.docs.rate"));
metrics.add(new Metric("content.proton.docsum.latency.max"));
metrics.add(new Metric("content.proton.docsum.latency.sum"));
metrics.add(new Metric("content.proton.docsum.latency.count"));
metrics.add(new Metric("content.proton.docsum.latency.average"));
metrics.add(new Metric("content.proton.transport.query.latency.max"));
metrics.add(new Metric("content.proton.transport.query.latency.sum"));
metrics.add(new Metric("content.proton.transport.query.latency.count"));
metrics.add(new Metric("content.proton.transport.query.latency.average"));
metrics.add(new Metric("content.proton.search_protocol.query.latency.max"));
metrics.add(new Metric("content.proton.search_protocol.query.latency.sum"));
metrics.add(new Metric("content.proton.search_protocol.query.latency.count"));
metrics.add(new Metric("content.proton.search_protocol.query.request_size.max"));
metrics.add(new Metric("content.proton.search_protocol.query.request_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.query.request_size.count"));
metrics.add(new Metric("content.proton.search_protocol.query.reply_size.max"));
metrics.add(new Metric("content.proton.search_protocol.query.reply_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.query.reply_size.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.latency.max"));
metrics.add(new Metric("content.proton.search_protocol.docsum.latency.sum"));
metrics.add(new Metric("content.proton.search_protocol.docsum.latency.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.max"));
metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.max"));
metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.requested_documents.count"));
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.proton");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.flush");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.match");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.docsum");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.shared");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.warmup");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.field_writer");
metrics.add(new Metric("content.proton.documentdb.job.total.average"));
metrics.add(new Metric("content.proton.documentdb.job.attribute_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.memory_index_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.disk_index_fusion.average"));
metrics.add(new Metric("content.proton.documentdb.job.document_store_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.document_store_compact.average"));
metrics.add(new Metric("content.proton.documentdb.job.bucket_move.average"));
metrics.add(new Metric("content.proton.documentdb.job.lid_space_compact.average"));
metrics.add(new Metric("content.proton.documentdb.job.removed_documents_prune.average"));
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.master");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.summary");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index_field_inverter");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index_field_writer");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.attribute_field_writer");
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_limit.last"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_limit.last"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_limit.last"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.highest_used_lid.last"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.highest_used_lid.last"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.highest_used_lid.last"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.used_lids.last"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.used_lids.last"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.used_lids.last"));
metrics.add(new Metric("content.proton.documentdb.bucket_move.buckets_pending.last"));
metrics.add(new Metric("content.proton.resource_usage.disk.average"));
metrics.add(new Metric("content.proton.resource_usage.disk_usage.total.max"));
metrics.add(new Metric("content.proton.resource_usage.disk_usage.total_utilization.max"));
metrics.add(new Metric("content.proton.resource_usage.disk_usage.transient.max"));
metrics.add(new Metric("content.proton.resource_usage.memory.average"));
metrics.add(new Metric("content.proton.resource_usage.memory_usage.total.max"));
metrics.add(new Metric("content.proton.resource_usage.memory_usage.total_utilization.max"));
metrics.add(new Metric("content.proton.resource_usage.memory_usage.transient.max"));
metrics.add(new Metric("content.proton.resource_usage.disk_utilization.average"));
metrics.add(new Metric("content.proton.resource_usage.memory_utilization.average"));
metrics.add(new Metric("content.proton.resource_usage.transient_memory.average"));
metrics.add(new Metric("content.proton.resource_usage.transient_disk.average"));
metrics.add(new Metric("content.proton.resource_usage.memory_mappings.max"));
metrics.add(new Metric("content.proton.resource_usage.open_file_descriptors.max"));
metrics.add(new Metric("content.proton.resource_usage.feeding_blocked.max"));
metrics.add(new Metric("content.proton.resource_usage.malloc_arena.max"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.address_space.max"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.last"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.setup.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.setup.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.setup.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.read.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.read.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.read.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.write.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.write.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.write.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.compact.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.compact.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.compact.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.other.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.other.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.other.count"));
metrics.add(new Metric("content.proton.transactionlog.entries.average"));
metrics.add(new Metric("content.proton.transactionlog.disk_usage.average"));
metrics.add(new Metric("content.proton.transactionlog.replay_time.last"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.memory_usage.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.hit_rate.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.lookups.rate"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.invalidations.rate"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.memory_usage.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.hit_rate.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.lookups.rate"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.invalidations.rate"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.matching.queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.soft_doomed_queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.max"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.count"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.average"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.average"));
metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doomed_queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.min"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.average"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.average"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.grouping_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.grouping_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.grouping_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.average"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.limited_queries.rate"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.max"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.sum"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.count"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.rate"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.max"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.sum"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.count"));
return metrics;
}
private static Set<Metric> getStorageMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("vds.datastored.alldisks.docs.average"));
metrics.add(new Metric("vds.datastored.alldisks.bytes.average"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.max"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.sum"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.count"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.average"));
metrics.add(new Metric("vds.visitor.allthreads.completed.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.completed.sum.rate"));
metrics.add(new Metric("vds.visitor.allthreads.created.sum.rate"));
metrics.add(new Metric("vds.visitor.allthreads.failed.sum.rate"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.max"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.sum"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.count"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.average"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.max"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.sum"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.count"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.average"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.max"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.count"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_window_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_window_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_window_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_waiting_threads.max"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_waiting_threads.sum"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_waiting_threads.count"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_active_tokens.max"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_active_tokens.sum"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_active_tokens.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allstripes.throttled_rpc_direct_dispatches.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allstripes.throttled_persistence_thread_polls.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allstripes.timeouts_waiting_for_throttle_token.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.test_and_set_failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.test_and_set_failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.test_and_set_failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.splitbuckets.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.joinbuckets.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.setbucketstates.count.rate"));
return metrics;
}
private static void addMetric(Set<Metric> metrics, String metricName, Iterable<String> aggregateSuffices) {
for (String suffix : aggregateSuffices) {
metrics.add(new Metric(metricName + "." + suffix));
}
}
} | class VespaMetricSet {
public static final MetricSet vespaMetricSet = new MetricSet("vespa",
getVespaMetrics(),
singleton(defaultVespaMetricSet));
private static Set<Metric> getVespaMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.addAll(getSearchNodeMetrics());
metrics.addAll(getStorageMetrics());
metrics.addAll(getDistributorMetrics());
metrics.addAll(getDocprocMetrics());
metrics.addAll(getClusterControllerMetrics());
metrics.addAll(getQrserverMetrics());
metrics.addAll(getContainerMetrics());
metrics.addAll(getConfigServerMetrics());
metrics.addAll(getSentinelMetrics());
metrics.addAll(getOtherMetrics());
return Collections.unmodifiableSet(metrics);
}
private static Set<Metric> getSentinelMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("sentinel.restarts.count"));
metrics.add(new Metric("sentinel.totalRestarts.last"));
metrics.add(new Metric("sentinel.uptime.last"));
metrics.add(new Metric("sentinel.running.count"));
metrics.add(new Metric("sentinel.running.last"));
return metrics;
}
private static Set<Metric> getOtherMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("slobrok.heartbeats.failed.count"));
metrics.add(new Metric("logd.processed.lines.count"));
metrics.add(new Metric("worker.connections.max"));
metrics.add(new Metric("endpoint.certificate.expiry.seconds"));
metrics.add(new Metric("jrt.transport.tls-certificate-verification-failures"));
metrics.add(new Metric("jrt.transport.peer-authorization-failures"));
metrics.add(new Metric("jrt.transport.server.tls-connections-established"));
metrics.add(new Metric("jrt.transport.client.tls-connections-established"));
metrics.add(new Metric("jrt.transport.server.unencrypted-connections-established"));
metrics.add(new Metric("jrt.transport.client.unencrypted-connections-established"));
metrics.add(new Metric("vds.server.network.tls-handshakes-failed"));
metrics.add(new Metric("vds.server.network.peer-authorization-failures"));
metrics.add(new Metric("vds.server.network.client.tls-connections-established"));
metrics.add(new Metric("vds.server.network.server.tls-connections-established"));
metrics.add(new Metric("vds.server.network.client.insecure-connections-established"));
metrics.add(new Metric("vds.server.network.server.insecure-connections-established"));
metrics.add(new Metric("vds.server.network.tls-connections-broken"));
metrics.add(new Metric("vds.server.network.failed-tls-config-reloads"));
metrics.add(new Metric("vds.server.fnet.num-connections"));
metrics.add(new Metric("node-certificate.expiry.seconds"));
return metrics;
}
private static Set<Metric> getConfigServerMetrics() {
Set<Metric> metrics =new LinkedHashSet<>();
metrics.add(new Metric("configserver.requests.count"));
metrics.add(new Metric("configserver.failedRequests.count"));
metrics.add(new Metric("configserver.latency.max"));
metrics.add(new Metric("configserver.latency.sum"));
metrics.add(new Metric("configserver.latency.count"));
metrics.add(new Metric("configserver.latency.average"));
metrics.add(new Metric("configserver.cacheConfigElems.last"));
metrics.add(new Metric("configserver.cacheChecksumElems.last"));
metrics.add(new Metric("configserver.hosts.last"));
metrics.add(new Metric("configserver.delayedResponses.count"));
metrics.add(new Metric("configserver.sessionChangeErrors.count"));
metrics.add(new Metric("configserver.zkZNodes.last"));
metrics.add(new Metric("configserver.zkAvgLatency.last"));
metrics.add(new Metric("configserver.zkMaxLatency.last"));
metrics.add(new Metric("configserver.zkConnections.last"));
metrics.add(new Metric("configserver.zkOutstandingRequests.last"));
return metrics;
}
private static Set<Metric> getContainerMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
addMetric(metrics, "jdisc.http.requests", List.of("rate", "count"));
metrics.add(new Metric("handled.requests.count"));
metrics.add(new Metric("handled.latency.max"));
metrics.add(new Metric("handled.latency.sum"));
metrics.add(new Metric("handled.latency.count"));
metrics.add(new Metric("handled.latency.average"));
metrics.add(new Metric("serverRejectedRequests.rate"));
metrics.add(new Metric("serverRejectedRequests.count"));
metrics.add(new Metric("serverThreadPoolSize.average"));
metrics.add(new Metric("serverThreadPoolSize.min"));
metrics.add(new Metric("serverThreadPoolSize.max"));
metrics.add(new Metric("serverThreadPoolSize.rate"));
metrics.add(new Metric("serverThreadPoolSize.count"));
metrics.add(new Metric("serverThreadPoolSize.last"));
metrics.add(new Metric("serverActiveThreads.average"));
metrics.add(new Metric("serverActiveThreads.min"));
metrics.add(new Metric("serverActiveThreads.max"));
metrics.add(new Metric("serverActiveThreads.rate"));
metrics.add(new Metric("serverActiveThreads.sum"));
metrics.add(new Metric("serverActiveThreads.count"));
metrics.add(new Metric("serverActiveThreads.last"));
metrics.add(new Metric("serverNumOpenConnections.average"));
metrics.add(new Metric("serverNumOpenConnections.max"));
metrics.add(new Metric("serverNumOpenConnections.last"));
metrics.add(new Metric("serverNumConnections.average"));
metrics.add(new Metric("serverNumConnections.max"));
metrics.add(new Metric("serverNumConnections.last"));
metrics.add(new Metric("serverBytesReceived.sum"));
metrics.add(new Metric("serverBytesReceived.count"));
metrics.add(new Metric("serverBytesSent.sum"));
metrics.add(new Metric("serverBytesSent.count"));
{
List<String> suffixes = List.of("sum", "count", "last", "min", "max");
addMetric(metrics, "jdisc.thread_pool.unhandled_exceptions", suffixes);
addMetric(metrics, "jdisc.thread_pool.work_queue.capacity", suffixes);
addMetric(metrics, "jdisc.thread_pool.work_queue.size", suffixes);
addMetric(metrics, "jdisc.thread_pool.rejected_tasks", suffixes);
addMetric(metrics, "jdisc.thread_pool.size", suffixes);
addMetric(metrics, "jdisc.thread_pool.max_allowed_size", suffixes);
addMetric(metrics, "jdisc.thread_pool.active_threads", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.max", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.min", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.reserved", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.busy", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.total", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.queue.size", suffixes);
}
metrics.add(new Metric("httpapi_latency.max"));
metrics.add(new Metric("httpapi_latency.sum"));
metrics.add(new Metric("httpapi_latency.count"));
metrics.add(new Metric("httpapi_latency.average"));
metrics.add(new Metric("httpapi_pending.max"));
metrics.add(new Metric("httpapi_pending.sum"));
metrics.add(new Metric("httpapi_pending.count"));
metrics.add(new Metric("httpapi_pending.average"));
metrics.add(new Metric("httpapi_num_operations.rate"));
metrics.add(new Metric("httpapi_num_updates.rate"));
metrics.add(new Metric("httpapi_num_removes.rate"));
metrics.add(new Metric("httpapi_num_puts.rate"));
metrics.add(new Metric("httpapi_succeeded.rate"));
metrics.add(new Metric("httpapi_failed.rate"));
metrics.add(new Metric("httpapi_parse_error.rate"));
addMetric(metrics, "httpapi_condition_not_met", List.of("rate"));
addMetric(metrics, "httpapi_not_found", List.of("rate"));
metrics.add(new Metric("mem.heap.total.average"));
metrics.add(new Metric("mem.heap.free.average"));
metrics.add(new Metric("mem.heap.used.average"));
metrics.add(new Metric("mem.heap.used.max"));
metrics.add(new Metric("jdisc.memory_mappings.max"));
metrics.add(new Metric("jdisc.open_file_descriptors.max"));
metrics.add(new Metric("mem.direct.total.average"));
metrics.add(new Metric("mem.direct.free.average"));
metrics.add(new Metric("mem.direct.used.average"));
metrics.add(new Metric("mem.direct.used.max"));
metrics.add(new Metric("mem.direct.count.max"));
metrics.add(new Metric("mem.native.total.average"));
metrics.add(new Metric("mem.native.free.average"));
metrics.add(new Metric("mem.native.used.average"));
metrics.add(new Metric("mem.native.used.max"));
metrics.add(new Metric("jdisc.gc.count.average"));
metrics.add(new Metric("jdisc.gc.count.max"));
metrics.add(new Metric("jdisc.gc.count.last"));
metrics.add(new Metric("jdisc.gc.ms.average"));
metrics.add(new Metric("jdisc.gc.ms.max"));
metrics.add(new Metric("jdisc.gc.ms.last"));
metrics.add(new Metric("jdisc.deactivated_containers.total.last"));
metrics.add(new Metric("jdisc.deactivated_containers.with_retained_refs.last"));
metrics.add(new Metric("athenz-tenant-cert.expiry.seconds.last"));
metrics.add(new Metric("container-iam-role.expiry.seconds"));
metrics.add(new Metric("jdisc.http.request.prematurely_closed.rate"));
addMetric(metrics, "jdisc.http.request.requests_per_connection", List.of("sum", "count", "min", "max", "average"));
metrics.add(new Metric("http.status.1xx.rate"));
metrics.add(new Metric("http.status.2xx.rate"));
metrics.add(new Metric("http.status.3xx.rate"));
metrics.add(new Metric("http.status.4xx.rate"));
metrics.add(new Metric("http.status.5xx.rate"));
metrics.add(new Metric("http.status.401.rate"));
metrics.add(new Metric("http.status.403.rate"));
metrics.add(new Metric("jdisc.http.request.uri_length.max"));
metrics.add(new Metric("jdisc.http.request.uri_length.sum"));
metrics.add(new Metric("jdisc.http.request.uri_length.count"));
metrics.add(new Metric("jdisc.http.request.uri_length.average"));
metrics.add(new Metric("jdisc.http.request.content_size.max"));
metrics.add(new Metric("jdisc.http.request.content_size.sum"));
metrics.add(new Metric("jdisc.http.request.content_size.count"));
metrics.add(new Metric("jdisc.http.request.content_size.average"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.missing_client_cert.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.expired_client_cert.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.invalid_client_cert.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.incompatible_protocols.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.incompatible_ciphers.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.unknown.rate"));
metrics.add(new Metric("jdisc.http.handler.unhandled_exceptions.rate"));
addMetric(metrics, "jdisc.http.filtering.request.handled", List.of("rate"));
addMetric(metrics, "jdisc.http.filtering.request.unhandled", List.of("rate"));
addMetric(metrics, "jdisc.http.filtering.response.handled", List.of("rate"));
addMetric(metrics, "jdisc.http.filtering.response.unhandled", List.of("rate"));
addMetric(metrics, "jdisc.application.failed_component_graphs", List.of("rate"));
addMetric(metrics, "jdisc.http.filter.rule.blocked_requests", List.of("rate"));
addMetric(metrics, "jdisc.http.filter.rule.allowed_requests", List.of("rate"));
return metrics;
}
private static Set<Metric> getClusterControllerMetrics() {
Set<Metric> metrics =new LinkedHashSet<>();
metrics.add(new Metric("cluster-controller.down.count.last"));
metrics.add(new Metric("cluster-controller.initializing.count.last"));
metrics.add(new Metric("cluster-controller.maintenance.count.last"));
metrics.add(new Metric("cluster-controller.retired.count.last"));
metrics.add(new Metric("cluster-controller.stopping.count.last"));
metrics.add(new Metric("cluster-controller.up.count.last"));
metrics.add(new Metric("cluster-controller.cluster-state-change.count"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.last"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.max"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.sum"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.count"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.last"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.max"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.sum"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.count"));
metrics.add(new Metric("cluster-controller.work-ms.last"));
metrics.add(new Metric("cluster-controller.work-ms.sum"));
metrics.add(new Metric("cluster-controller.work-ms.count"));
metrics.add(new Metric("cluster-controller.is-master.last"));
metrics.add(new Metric("cluster-controller.remote-task-queue.size.last"));
metrics.add(new Metric("cluster-controller.node-event.count"));
metrics.add(new Metric("cluster-controller.resource_usage.nodes_above_limit.last"));
metrics.add(new Metric("cluster-controller.resource_usage.nodes_above_limit.max"));
metrics.add(new Metric("cluster-controller.resource_usage.max_memory_utilization.last"));
metrics.add(new Metric("cluster-controller.resource_usage.max_memory_utilization.max"));
metrics.add(new Metric("cluster-controller.resource_usage.max_disk_utilization.last"));
metrics.add(new Metric("cluster-controller.resource_usage.max_disk_utilization.max"));
metrics.add(new Metric("cluster-controller.resource_usage.disk_limit.last"));
metrics.add(new Metric("cluster-controller.resource_usage.memory_limit.last"));
metrics.add(new Metric("reindexing.progress.last"));
return metrics;
}
private static Set<Metric> getDocprocMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("documents_processed.rate"));
return metrics;
}
private static Set<Metric> getQrserverMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("peak_qps.max"));
metrics.add(new Metric("search_connections.max"));
metrics.add(new Metric("search_connections.sum"));
metrics.add(new Metric("search_connections.count"));
metrics.add(new Metric("search_connections.average"));
metrics.add(new Metric("feed.latency.max"));
metrics.add(new Metric("feed.latency.sum"));
metrics.add(new Metric("feed.latency.count"));
metrics.add(new Metric("feed.latency.average"));
metrics.add(new Metric("feed.http-requests.count"));
metrics.add(new Metric("feed.http-requests.rate"));
metrics.add(new Metric("queries.rate"));
metrics.add(new Metric("query_container_latency.max"));
metrics.add(new Metric("query_container_latency.sum"));
metrics.add(new Metric("query_container_latency.count"));
metrics.add(new Metric("query_container_latency.average"));
metrics.add(new Metric("query_latency.max"));
metrics.add(new Metric("query_latency.sum"));
metrics.add(new Metric("query_latency.count"));
metrics.add(new Metric("query_latency.average"));
metrics.add(new Metric("query_latency.95percentile"));
metrics.add(new Metric("query_latency.99percentile"));
metrics.add(new Metric("failed_queries.rate"));
metrics.add(new Metric("degraded_queries.rate"));
metrics.add(new Metric("hits_per_query.max"));
metrics.add(new Metric("hits_per_query.sum"));
metrics.add(new Metric("hits_per_query.count"));
metrics.add(new Metric("hits_per_query.average"));
metrics.add(new Metric("hits_per_query.95percentile"));
metrics.add(new Metric("hits_per_query.99percentile"));
metrics.add(new Metric("query_hit_offset.max"));
metrics.add(new Metric("query_hit_offset.sum"));
metrics.add(new Metric("query_hit_offset.count"));
metrics.add(new Metric("documents_covered.count"));
metrics.add(new Metric("documents_total.count"));
metrics.add(new Metric("dispatch_internal.rate"));
metrics.add(new Metric("dispatch_fdispatch.rate"));
addMetric(metrics, "jdisc.render.latency", Set.of("min", "max", "count", "sum", "last", "average"));
addMetric(metrics, "query_item_count", Set.of("max", "sum", "count"));
metrics.add(new Metric("totalhits_per_query.max"));
metrics.add(new Metric("totalhits_per_query.sum"));
metrics.add(new Metric("totalhits_per_query.count"));
metrics.add(new Metric("totalhits_per_query.average"));
metrics.add(new Metric("totalhits_per_query.95percentile"));
metrics.add(new Metric("totalhits_per_query.99percentile"));
metrics.add(new Metric("empty_results.rate"));
metrics.add(new Metric("requestsOverQuota.rate"));
metrics.add(new Metric("requestsOverQuota.count"));
metrics.add(new Metric("relevance.at_1.sum"));
metrics.add(new Metric("relevance.at_1.count"));
metrics.add(new Metric("relevance.at_1.average"));
metrics.add(new Metric("relevance.at_3.sum"));
metrics.add(new Metric("relevance.at_3.count"));
metrics.add(new Metric("relevance.at_3.average"));
metrics.add(new Metric("relevance.at_10.sum"));
metrics.add(new Metric("relevance.at_10.count"));
metrics.add(new Metric("relevance.at_10.average"));
metrics.add(new Metric("error.timeout.rate"));
metrics.add(new Metric("error.backends_oos.rate"));
metrics.add(new Metric("error.plugin_failure.rate"));
metrics.add(new Metric("error.backend_communication_error.rate"));
metrics.add(new Metric("error.empty_document_summaries.rate"));
metrics.add(new Metric("error.invalid_query_parameter.rate"));
metrics.add(new Metric("error.internal_server_error.rate"));
metrics.add(new Metric("error.misconfigured_server.rate"));
metrics.add(new Metric("error.invalid_query_transformation.rate"));
metrics.add(new Metric("error.result_with_errors.rate"));
metrics.add(new Metric("error.unspecified.rate"));
metrics.add(new Metric("error.unhandled_exception.rate"));
return metrics;
}
private static void addSearchNodeExecutorMetrics(Set<Metric> metrics, String prefix) {
metrics.add(new Metric(prefix + ".queuesize.max"));
metrics.add(new Metric(prefix + ".queuesize.sum"));
metrics.add(new Metric(prefix + ".queuesize.count"));
metrics.add(new Metric(prefix + ".maxpending.last"));
metrics.add(new Metric(prefix + ".accepted.rate"));
metrics.add(new Metric(prefix + ".wakeups.rate"));
metrics.add(new Metric(prefix + ".utilization.max"));
metrics.add(new Metric(prefix + ".utilization.sum"));
metrics.add(new Metric(prefix + ".utilization.count"));
}
private static Set<Metric> getSearchNodeMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("content.proton.documentdb.documents.total.last"));
metrics.add(new Metric("content.proton.documentdb.documents.ready.last"));
metrics.add(new Metric("content.proton.documentdb.documents.active.last"));
metrics.add(new Metric("content.proton.documentdb.documents.removed.last"));
metrics.add(new Metric("content.proton.documentdb.index.docs_in_memory.last"));
metrics.add(new Metric("content.proton.documentdb.disk_usage.last"));
metrics.add(new Metric("content.proton.documentdb.memory_usage.allocated_bytes.max"));
metrics.add(new Metric("content.proton.documentdb.heart_beat_age.last"));
metrics.add(new Metric("content.proton.transport.query.count.rate"));
metrics.add(new Metric("content.proton.docsum.docs.rate"));
metrics.add(new Metric("content.proton.docsum.latency.max"));
metrics.add(new Metric("content.proton.docsum.latency.sum"));
metrics.add(new Metric("content.proton.docsum.latency.count"));
metrics.add(new Metric("content.proton.docsum.latency.average"));
metrics.add(new Metric("content.proton.transport.query.latency.max"));
metrics.add(new Metric("content.proton.transport.query.latency.sum"));
metrics.add(new Metric("content.proton.transport.query.latency.count"));
metrics.add(new Metric("content.proton.transport.query.latency.average"));
metrics.add(new Metric("content.proton.search_protocol.query.latency.max"));
metrics.add(new Metric("content.proton.search_protocol.query.latency.sum"));
metrics.add(new Metric("content.proton.search_protocol.query.latency.count"));
metrics.add(new Metric("content.proton.search_protocol.query.request_size.max"));
metrics.add(new Metric("content.proton.search_protocol.query.request_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.query.request_size.count"));
metrics.add(new Metric("content.proton.search_protocol.query.reply_size.max"));
metrics.add(new Metric("content.proton.search_protocol.query.reply_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.query.reply_size.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.latency.max"));
metrics.add(new Metric("content.proton.search_protocol.docsum.latency.sum"));
metrics.add(new Metric("content.proton.search_protocol.docsum.latency.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.max"));
metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.max"));
metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.requested_documents.count"));
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.proton");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.flush");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.match");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.docsum");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.shared");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.warmup");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.field_writer");
metrics.add(new Metric("content.proton.documentdb.job.total.average"));
metrics.add(new Metric("content.proton.documentdb.job.attribute_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.memory_index_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.disk_index_fusion.average"));
metrics.add(new Metric("content.proton.documentdb.job.document_store_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.document_store_compact.average"));
metrics.add(new Metric("content.proton.documentdb.job.bucket_move.average"));
metrics.add(new Metric("content.proton.documentdb.job.lid_space_compact.average"));
metrics.add(new Metric("content.proton.documentdb.job.removed_documents_prune.average"));
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.master");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.summary");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index_field_inverter");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index_field_writer");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.attribute_field_writer");
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_limit.last"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_limit.last"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_limit.last"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.highest_used_lid.last"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.highest_used_lid.last"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.highest_used_lid.last"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.used_lids.last"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.used_lids.last"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.used_lids.last"));
metrics.add(new Metric("content.proton.documentdb.bucket_move.buckets_pending.last"));
metrics.add(new Metric("content.proton.resource_usage.disk.average"));
metrics.add(new Metric("content.proton.resource_usage.disk_usage.total.max"));
metrics.add(new Metric("content.proton.resource_usage.disk_usage.total_utilization.max"));
metrics.add(new Metric("content.proton.resource_usage.disk_usage.transient.max"));
metrics.add(new Metric("content.proton.resource_usage.memory.average"));
metrics.add(new Metric("content.proton.resource_usage.memory_usage.total.max"));
metrics.add(new Metric("content.proton.resource_usage.memory_usage.total_utilization.max"));
metrics.add(new Metric("content.proton.resource_usage.memory_usage.transient.max"));
metrics.add(new Metric("content.proton.resource_usage.disk_utilization.average"));
metrics.add(new Metric("content.proton.resource_usage.memory_utilization.average"));
metrics.add(new Metric("content.proton.resource_usage.transient_memory.average"));
metrics.add(new Metric("content.proton.resource_usage.transient_disk.average"));
metrics.add(new Metric("content.proton.resource_usage.memory_mappings.max"));
metrics.add(new Metric("content.proton.resource_usage.open_file_descriptors.max"));
metrics.add(new Metric("content.proton.resource_usage.feeding_blocked.max"));
metrics.add(new Metric("content.proton.resource_usage.malloc_arena.max"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.address_space.max"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.last"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.setup.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.setup.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.setup.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.read.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.read.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.read.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.write.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.write.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.write.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.compact.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.compact.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.compact.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.other.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.other.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.other.count"));
metrics.add(new Metric("content.proton.transactionlog.entries.average"));
metrics.add(new Metric("content.proton.transactionlog.disk_usage.average"));
metrics.add(new Metric("content.proton.transactionlog.replay_time.last"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.memory_usage.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.hit_rate.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.lookups.rate"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.invalidations.rate"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.memory_usage.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.hit_rate.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.lookups.rate"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.invalidations.rate"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.matching.queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.soft_doomed_queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.max"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.count"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.average"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.average"));
metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doomed_queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.min"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.average"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.average"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.grouping_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.grouping_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.grouping_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.average"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.limited_queries.rate"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.max"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.sum"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.count"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.rate"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.max"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.sum"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.count"));
return metrics;
}
private static Set<Metric> getStorageMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("vds.datastored.alldisks.docs.average"));
metrics.add(new Metric("vds.datastored.alldisks.bytes.average"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.max"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.sum"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.count"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.average"));
metrics.add(new Metric("vds.visitor.allthreads.completed.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.completed.sum.rate"));
metrics.add(new Metric("vds.visitor.allthreads.created.sum.rate"));
metrics.add(new Metric("vds.visitor.allthreads.failed.sum.rate"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.max"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.sum"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.count"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.average"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.max"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.sum"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.count"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.average"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.max"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.count"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_window_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_window_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_window_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_waiting_threads.max"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_waiting_threads.sum"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_waiting_threads.count"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_active_tokens.max"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_active_tokens.sum"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_active_tokens.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allstripes.throttled_rpc_direct_dispatches.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allstripes.throttled_persistence_thread_polls.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allstripes.timeouts_waiting_for_throttle_token.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.test_and_set_failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.test_and_set_failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.test_and_set_failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.splitbuckets.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.joinbuckets.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.setbucketstates.count.rate"));
return metrics;
}
private static void addMetric(Set<Metric> metrics, String metricName, Iterable<String> aggregateSuffices) {
for (String suffix : aggregateSuffices) {
metrics.add(new Metric(metricName + "." + suffix));
}
}
} | |
Done | private static Set<Metric> getDistributorMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("vds.idealstate.buckets_rechecking.average"));
metrics.add(new Metric("vds.idealstate.idealstate_diff.average"));
metrics.add(new Metric("vds.idealstate.buckets_toofewcopies.average"));
metrics.add(new Metric("vds.idealstate.buckets_toomanycopies.average"));
metrics.add(new Metric("vds.idealstate.buckets.average"));
metrics.add(new Metric("vds.idealstate.buckets_notrusted.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_moving_out.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_copying_out.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_copying_in.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_syncing.average"));
metrics.add(new Metric("vds.idealstate.max_observed_time_since_last_gc_sec.average"));
metrics.add(new Metric("vds.idealstate.delete_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.delete_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.delete_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.merge_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.merge_bucket.blocked.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.throttled.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_changed.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_delete_blocked.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_delete_failed.rate"));
metrics.add(new Metric("vds.idealstate.split_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.split_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.split_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.join_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.join_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.join_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.garbage_collection.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.garbage_collection.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.garbage_collection.pending.average"));
metrics.add(new Metric("vds.idealstate.garbage_collection.documents_removed.count"));
metrics.add(new Metric("vds.idealstate.garbage_collection.documents_removed.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.max"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.count"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.average"));
metrics.add(new Metric("vds.distributor.puts.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.test_and_set_failed.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.concurrent_mutations.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.notconnected.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.notready.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.wrongdistributor.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.safe_time_not_reached.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.storagefailure.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.timeout.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.busy.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.inconsistent_bucket.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.max"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.count"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.average"));
metrics.add(new Metric("vds.distributor.removes.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.test_and_set_failed.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.concurrent_mutations.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.max"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.count"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.average"));
metrics.add(new Metric("vds.distributor.updates.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.test_and_set_failed.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.concurrent_mutations.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.diverging_timestamp_updates.rate"));
metrics.add(new Metric("vds.distributor.removelocations.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.removelocations.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.max"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.count"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.average"));
metrics.add(new Metric("vds.distributor.gets.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.gets.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.gets.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.max"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.count"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.average"));
metrics.add(new Metric("vds.distributor.visitor.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.notready.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.notconnected.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.wrongdistributor.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.safe_time_not_reached.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.storagefailure.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.timeout.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.busy.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.inconsistent_bucket.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.docsstored.average"));
metrics.add(new Metric("vds.distributor.bytesstored.average"));
metrics.add(new Metric("vds.bouncer.clock_skew_aborts.count"));
metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.max"));
metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.sum"));
metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.count"));
metrics.add(new Metric("vds.mergethrottler.queuesize.max"));
metrics.add(new Metric("vds.mergethrottler.queuesize.sum"));
metrics.add(new Metric("vds.mergethrottler.queuesize.count"));
metrics.add(new Metric("vds.mergethrottler.active_window_size.max"));
metrics.add(new Metric("vds.mergethrottler.active_window_size.sum"));
metrics.add(new Metric("vds.mergethrottler.active_window_size.count"));
metrics.add(new Metric("vds.mergethrottler.bounced_due_to_back_pressure.rate"));
metrics.add(new Metric("vds.mergethrottler.locallyexecutedmerges.ok.rate"));
metrics.add(new Metric("vds.mergethrottler.mergechains.ok.rate"));
metrics.add(new Metric("vds.mergethrottler.mergechains.failures.busy.rate"));
metrics.add(new Metric("vds.mergethrottler.mergechains.failures.total.rate"));
return metrics;
} | private static Set<Metric> getDistributorMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("vds.idealstate.buckets_rechecking.average"));
metrics.add(new Metric("vds.idealstate.idealstate_diff.average"));
metrics.add(new Metric("vds.idealstate.buckets_toofewcopies.average"));
metrics.add(new Metric("vds.idealstate.buckets_toomanycopies.average"));
metrics.add(new Metric("vds.idealstate.buckets.average"));
metrics.add(new Metric("vds.idealstate.buckets_notrusted.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_moving_out.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_copying_out.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_copying_in.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_syncing.average"));
metrics.add(new Metric("vds.idealstate.max_observed_time_since_last_gc_sec.average"));
metrics.add(new Metric("vds.idealstate.delete_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.delete_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.delete_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.merge_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.merge_bucket.blocked.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.throttled.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_changed.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_delete_blocked.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_delete_failed.rate"));
metrics.add(new Metric("vds.idealstate.split_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.split_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.split_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.join_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.join_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.join_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.garbage_collection.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.garbage_collection.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.garbage_collection.pending.average"));
metrics.add(new Metric("vds.idealstate.garbage_collection.documents_removed.count"));
metrics.add(new Metric("vds.idealstate.garbage_collection.documents_removed.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.max"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.count"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.average"));
metrics.add(new Metric("vds.distributor.puts.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.test_and_set_failed.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.concurrent_mutations.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.notconnected.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.notready.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.wrongdistributor.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.safe_time_not_reached.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.storagefailure.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.timeout.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.busy.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.inconsistent_bucket.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.max"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.count"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.average"));
metrics.add(new Metric("vds.distributor.removes.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.test_and_set_failed.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.concurrent_mutations.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.max"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.count"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.average"));
metrics.add(new Metric("vds.distributor.updates.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.test_and_set_failed.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.concurrent_mutations.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.diverging_timestamp_updates.rate"));
metrics.add(new Metric("vds.distributor.removelocations.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.removelocations.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.max"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.count"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.average"));
metrics.add(new Metric("vds.distributor.gets.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.gets.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.gets.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.max"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.count"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.average"));
metrics.add(new Metric("vds.distributor.visitor.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.notready.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.notconnected.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.wrongdistributor.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.safe_time_not_reached.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.storagefailure.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.timeout.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.busy.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.inconsistent_bucket.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.docsstored.average"));
metrics.add(new Metric("vds.distributor.bytesstored.average"));
metrics.add(new Metric("vds.bouncer.clock_skew_aborts.count"));
metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.max"));
metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.sum"));
metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.count"));
metrics.add(new Metric("vds.mergethrottler.queuesize.max"));
metrics.add(new Metric("vds.mergethrottler.queuesize.sum"));
metrics.add(new Metric("vds.mergethrottler.queuesize.count"));
metrics.add(new Metric("vds.mergethrottler.active_window_size.max"));
metrics.add(new Metric("vds.mergethrottler.active_window_size.sum"));
metrics.add(new Metric("vds.mergethrottler.active_window_size.count"));
metrics.add(new Metric("vds.mergethrottler.bounced_due_to_back_pressure.rate"));
metrics.add(new Metric("vds.mergethrottler.locallyexecutedmerges.ok.rate"));
metrics.add(new Metric("vds.mergethrottler.mergechains.ok.rate"));
metrics.add(new Metric("vds.mergethrottler.mergechains.failures.busy.rate"));
metrics.add(new Metric("vds.mergethrottler.mergechains.failures.total.rate"));
return metrics;
} | class VespaMetricSet {
public static final MetricSet vespaMetricSet = new MetricSet("vespa",
getVespaMetrics(),
singleton(defaultVespaMetricSet));
private static Set<Metric> getVespaMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.addAll(getSearchNodeMetrics());
metrics.addAll(getStorageMetrics());
metrics.addAll(getDistributorMetrics());
metrics.addAll(getDocprocMetrics());
metrics.addAll(getClusterControllerMetrics());
metrics.addAll(getQrserverMetrics());
metrics.addAll(getContainerMetrics());
metrics.addAll(getConfigServerMetrics());
metrics.addAll(getSentinelMetrics());
metrics.addAll(getOtherMetrics());
return Collections.unmodifiableSet(metrics);
}
private static Set<Metric> getSentinelMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("sentinel.restarts.count"));
metrics.add(new Metric("sentinel.totalRestarts.last"));
metrics.add(new Metric("sentinel.uptime.last"));
metrics.add(new Metric("sentinel.running.count"));
metrics.add(new Metric("sentinel.running.last"));
return metrics;
}
private static Set<Metric> getOtherMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("slobrok.heartbeats.failed.count"));
metrics.add(new Metric("logd.processed.lines.count"));
metrics.add(new Metric("worker.connections.max"));
metrics.add(new Metric("endpoint.certificate.expiry.seconds"));
metrics.add(new Metric("jrt.transport.tls-certificate-verification-failures"));
metrics.add(new Metric("jrt.transport.peer-authorization-failures"));
metrics.add(new Metric("jrt.transport.server.tls-connections-established"));
metrics.add(new Metric("jrt.transport.client.tls-connections-established"));
metrics.add(new Metric("jrt.transport.server.unencrypted-connections-established"));
metrics.add(new Metric("jrt.transport.client.unencrypted-connections-established"));
metrics.add(new Metric("vds.server.network.tls-handshakes-failed"));
metrics.add(new Metric("vds.server.network.peer-authorization-failures"));
metrics.add(new Metric("vds.server.network.client.tls-connections-established"));
metrics.add(new Metric("vds.server.network.server.tls-connections-established"));
metrics.add(new Metric("vds.server.network.client.insecure-connections-established"));
metrics.add(new Metric("vds.server.network.server.insecure-connections-established"));
metrics.add(new Metric("vds.server.network.tls-connections-broken"));
metrics.add(new Metric("vds.server.network.failed-tls-config-reloads"));
metrics.add(new Metric("vds.server.fnet.num-connections"));
metrics.add(new Metric("node-certificate.expiry.seconds"));
return metrics;
}
private static Set<Metric> getConfigServerMetrics() {
Set<Metric> metrics =new LinkedHashSet<>();
metrics.add(new Metric("configserver.requests.count"));
metrics.add(new Metric("configserver.failedRequests.count"));
metrics.add(new Metric("configserver.latency.max"));
metrics.add(new Metric("configserver.latency.sum"));
metrics.add(new Metric("configserver.latency.count"));
metrics.add(new Metric("configserver.latency.average"));
metrics.add(new Metric("configserver.cacheConfigElems.last"));
metrics.add(new Metric("configserver.cacheChecksumElems.last"));
metrics.add(new Metric("configserver.hosts.last"));
metrics.add(new Metric("configserver.delayedResponses.count"));
metrics.add(new Metric("configserver.sessionChangeErrors.count"));
metrics.add(new Metric("configserver.zkZNodes.last"));
metrics.add(new Metric("configserver.zkAvgLatency.last"));
metrics.add(new Metric("configserver.zkMaxLatency.last"));
metrics.add(new Metric("configserver.zkConnections.last"));
metrics.add(new Metric("configserver.zkOutstandingRequests.last"));
return metrics;
}
private static Set<Metric> getContainerMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
addMetric(metrics, "jdisc.http.requests", List.of("rate", "count"));
metrics.add(new Metric("handled.requests.count"));
metrics.add(new Metric("handled.latency.max"));
metrics.add(new Metric("handled.latency.sum"));
metrics.add(new Metric("handled.latency.count"));
metrics.add(new Metric("handled.latency.average"));
metrics.add(new Metric("serverRejectedRequests.rate"));
metrics.add(new Metric("serverRejectedRequests.count"));
metrics.add(new Metric("serverThreadPoolSize.average"));
metrics.add(new Metric("serverThreadPoolSize.min"));
metrics.add(new Metric("serverThreadPoolSize.max"));
metrics.add(new Metric("serverThreadPoolSize.rate"));
metrics.add(new Metric("serverThreadPoolSize.count"));
metrics.add(new Metric("serverThreadPoolSize.last"));
metrics.add(new Metric("serverActiveThreads.average"));
metrics.add(new Metric("serverActiveThreads.min"));
metrics.add(new Metric("serverActiveThreads.max"));
metrics.add(new Metric("serverActiveThreads.rate"));
metrics.add(new Metric("serverActiveThreads.sum"));
metrics.add(new Metric("serverActiveThreads.count"));
metrics.add(new Metric("serverActiveThreads.last"));
metrics.add(new Metric("serverNumOpenConnections.average"));
metrics.add(new Metric("serverNumOpenConnections.max"));
metrics.add(new Metric("serverNumOpenConnections.last"));
metrics.add(new Metric("serverNumConnections.average"));
metrics.add(new Metric("serverNumConnections.max"));
metrics.add(new Metric("serverNumConnections.last"));
metrics.add(new Metric("serverBytesReceived.sum"));
metrics.add(new Metric("serverBytesReceived.count"));
metrics.add(new Metric("serverBytesSent.sum"));
metrics.add(new Metric("serverBytesSent.count"));
{
List<String> suffixes = List.of("sum", "count", "last", "min", "max");
addMetric(metrics, "jdisc.thread_pool.unhandled_exceptions", suffixes);
addMetric(metrics, "jdisc.thread_pool.work_queue.capacity", suffixes);
addMetric(metrics, "jdisc.thread_pool.work_queue.size", suffixes);
addMetric(metrics, "jdisc.thread_pool.rejected_tasks", suffixes);
addMetric(metrics, "jdisc.thread_pool.size", suffixes);
addMetric(metrics, "jdisc.thread_pool.max_allowed_size", suffixes);
addMetric(metrics, "jdisc.thread_pool.active_threads", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.max", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.min", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.reserved", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.busy", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.total", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.queue.size", suffixes);
}
metrics.add(new Metric("httpapi_latency.max"));
metrics.add(new Metric("httpapi_latency.sum"));
metrics.add(new Metric("httpapi_latency.count"));
metrics.add(new Metric("httpapi_latency.average"));
metrics.add(new Metric("httpapi_pending.max"));
metrics.add(new Metric("httpapi_pending.sum"));
metrics.add(new Metric("httpapi_pending.count"));
metrics.add(new Metric("httpapi_pending.average"));
metrics.add(new Metric("httpapi_num_operations.rate"));
metrics.add(new Metric("httpapi_num_updates.rate"));
metrics.add(new Metric("httpapi_num_removes.rate"));
metrics.add(new Metric("httpapi_num_puts.rate"));
metrics.add(new Metric("httpapi_succeeded.rate"));
metrics.add(new Metric("httpapi_failed.rate"));
metrics.add(new Metric("httpapi_parse_error.rate"));
addMetric(metrics, "httpapi_condition_not_met", List.of("rate"));
addMetric(metrics, "httpapi_not_found", List.of("rate"));
metrics.add(new Metric("mem.heap.total.average"));
metrics.add(new Metric("mem.heap.free.average"));
metrics.add(new Metric("mem.heap.used.average"));
metrics.add(new Metric("mem.heap.used.max"));
metrics.add(new Metric("jdisc.memory_mappings.max"));
metrics.add(new Metric("jdisc.open_file_descriptors.max"));
metrics.add(new Metric("mem.direct.total.average"));
metrics.add(new Metric("mem.direct.free.average"));
metrics.add(new Metric("mem.direct.used.average"));
metrics.add(new Metric("mem.direct.used.max"));
metrics.add(new Metric("mem.direct.count.max"));
metrics.add(new Metric("mem.native.total.average"));
metrics.add(new Metric("mem.native.free.average"));
metrics.add(new Metric("mem.native.used.average"));
metrics.add(new Metric("mem.native.used.max"));
metrics.add(new Metric("jdisc.gc.count.average"));
metrics.add(new Metric("jdisc.gc.count.max"));
metrics.add(new Metric("jdisc.gc.count.last"));
metrics.add(new Metric("jdisc.gc.ms.average"));
metrics.add(new Metric("jdisc.gc.ms.max"));
metrics.add(new Metric("jdisc.gc.ms.last"));
metrics.add(new Metric("jdisc.deactivated_containers.total.last"));
metrics.add(new Metric("jdisc.deactivated_containers.with_retained_refs.last"));
metrics.add(new Metric("athenz-tenant-cert.expiry.seconds.last"));
metrics.add(new Metric("container-iam-role.expiry.seconds"));
metrics.add(new Metric("jdisc.http.request.prematurely_closed.rate"));
addMetric(metrics, "jdisc.http.request.requests_per_connection", List.of("sum", "count", "min", "max", "average"));
metrics.add(new Metric("http.status.1xx.rate"));
metrics.add(new Metric("http.status.2xx.rate"));
metrics.add(new Metric("http.status.3xx.rate"));
metrics.add(new Metric("http.status.4xx.rate"));
metrics.add(new Metric("http.status.5xx.rate"));
metrics.add(new Metric("http.status.401.rate"));
metrics.add(new Metric("http.status.403.rate"));
metrics.add(new Metric("jdisc.http.request.uri_length.max"));
metrics.add(new Metric("jdisc.http.request.uri_length.sum"));
metrics.add(new Metric("jdisc.http.request.uri_length.count"));
metrics.add(new Metric("jdisc.http.request.uri_length.average"));
metrics.add(new Metric("jdisc.http.request.content_size.max"));
metrics.add(new Metric("jdisc.http.request.content_size.sum"));
metrics.add(new Metric("jdisc.http.request.content_size.count"));
metrics.add(new Metric("jdisc.http.request.content_size.average"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.missing_client_cert.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.expired_client_cert.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.invalid_client_cert.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.incompatible_protocols.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.incompatible_ciphers.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.unknown.rate"));
metrics.add(new Metric("jdisc.http.handler.unhandled_exceptions.rate"));
addMetric(metrics, "jdisc.http.filtering.request.handled", List.of("rate"));
addMetric(metrics, "jdisc.http.filtering.request.unhandled", List.of("rate"));
addMetric(metrics, "jdisc.http.filtering.response.handled", List.of("rate"));
addMetric(metrics, "jdisc.http.filtering.response.unhandled", List.of("rate"));
addMetric(metrics, "jdisc.application.failed_component_graphs", List.of("rate"));
addMetric(metrics, "jdisc.http.filter.rule.blocked_requests", List.of("rate"));
addMetric(metrics, "jdisc.http.filter.rule.allowed_requests", List.of("rate"));
return metrics;
}
private static Set<Metric> getClusterControllerMetrics() {
Set<Metric> metrics =new LinkedHashSet<>();
metrics.add(new Metric("cluster-controller.down.count.last"));
metrics.add(new Metric("cluster-controller.initializing.count.last"));
metrics.add(new Metric("cluster-controller.maintenance.count.last"));
metrics.add(new Metric("cluster-controller.retired.count.last"));
metrics.add(new Metric("cluster-controller.stopping.count.last"));
metrics.add(new Metric("cluster-controller.up.count.last"));
metrics.add(new Metric("cluster-controller.cluster-state-change.count"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.last"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.max"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.sum"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.count"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.last"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.max"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.sum"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.count"));
metrics.add(new Metric("cluster-controller.work-ms.last"));
metrics.add(new Metric("cluster-controller.work-ms.sum"));
metrics.add(new Metric("cluster-controller.work-ms.count"));
metrics.add(new Metric("cluster-controller.is-master.last"));
metrics.add(new Metric("cluster-controller.remote-task-queue.size.last"));
metrics.add(new Metric("cluster-controller.node-event.count"));
metrics.add(new Metric("cluster-controller.resource_usage.nodes_above_limit.last"));
metrics.add(new Metric("cluster-controller.resource_usage.nodes_above_limit.max"));
metrics.add(new Metric("cluster-controller.resource_usage.max_memory_utilization.last"));
metrics.add(new Metric("cluster-controller.resource_usage.max_memory_utilization.max"));
metrics.add(new Metric("cluster-controller.resource_usage.max_disk_utilization.last"));
metrics.add(new Metric("cluster-controller.resource_usage.max_disk_utilization.max"));
metrics.add(new Metric("cluster-controller.resource_usage.disk_limit.last"));
metrics.add(new Metric("cluster-controller.resource_usage.memory_limit.last"));
metrics.add(new Metric("reindexing.progress.last"));
return metrics;
}
private static Set<Metric> getDocprocMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("documents_processed.rate"));
return metrics;
}
private static Set<Metric> getQrserverMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("peak_qps.max"));
metrics.add(new Metric("search_connections.max"));
metrics.add(new Metric("search_connections.sum"));
metrics.add(new Metric("search_connections.count"));
metrics.add(new Metric("search_connections.average"));
metrics.add(new Metric("feed.latency.max"));
metrics.add(new Metric("feed.latency.sum"));
metrics.add(new Metric("feed.latency.count"));
metrics.add(new Metric("feed.latency.average"));
metrics.add(new Metric("feed.http-requests.count"));
metrics.add(new Metric("feed.http-requests.rate"));
metrics.add(new Metric("queries.rate"));
metrics.add(new Metric("query_container_latency.max"));
metrics.add(new Metric("query_container_latency.sum"));
metrics.add(new Metric("query_container_latency.count"));
metrics.add(new Metric("query_container_latency.average"));
metrics.add(new Metric("query_latency.max"));
metrics.add(new Metric("query_latency.sum"));
metrics.add(new Metric("query_latency.count"));
metrics.add(new Metric("query_latency.average"));
metrics.add(new Metric("query_latency.95percentile"));
metrics.add(new Metric("query_latency.99percentile"));
metrics.add(new Metric("failed_queries.rate"));
metrics.add(new Metric("degraded_queries.rate"));
metrics.add(new Metric("hits_per_query.max"));
metrics.add(new Metric("hits_per_query.sum"));
metrics.add(new Metric("hits_per_query.count"));
metrics.add(new Metric("hits_per_query.average"));
metrics.add(new Metric("hits_per_query.95percentile"));
metrics.add(new Metric("hits_per_query.99percentile"));
metrics.add(new Metric("query_hit_offset.max"));
metrics.add(new Metric("query_hit_offset.sum"));
metrics.add(new Metric("query_hit_offset.count"));
metrics.add(new Metric("documents_covered.count"));
metrics.add(new Metric("documents_total.count"));
metrics.add(new Metric("dispatch_internal.rate"));
metrics.add(new Metric("dispatch_fdispatch.rate"));
addMetric(metrics, "jdisc.render.latency", Set.of("min", "max", "count", "sum", "last", "average"));
addMetric(metrics, "query_item_count", Set.of("max", "sum", "count"));
metrics.add(new Metric("totalhits_per_query.max"));
metrics.add(new Metric("totalhits_per_query.sum"));
metrics.add(new Metric("totalhits_per_query.count"));
metrics.add(new Metric("totalhits_per_query.average"));
metrics.add(new Metric("totalhits_per_query.95percentile"));
metrics.add(new Metric("totalhits_per_query.99percentile"));
metrics.add(new Metric("empty_results.rate"));
metrics.add(new Metric("requestsOverQuota.rate"));
metrics.add(new Metric("requestsOverQuota.count"));
metrics.add(new Metric("relevance.at_1.sum"));
metrics.add(new Metric("relevance.at_1.count"));
metrics.add(new Metric("relevance.at_1.average"));
metrics.add(new Metric("relevance.at_3.sum"));
metrics.add(new Metric("relevance.at_3.count"));
metrics.add(new Metric("relevance.at_3.average"));
metrics.add(new Metric("relevance.at_10.sum"));
metrics.add(new Metric("relevance.at_10.count"));
metrics.add(new Metric("relevance.at_10.average"));
metrics.add(new Metric("error.timeout.rate"));
metrics.add(new Metric("error.backends_oos.rate"));
metrics.add(new Metric("error.plugin_failure.rate"));
metrics.add(new Metric("error.backend_communication_error.rate"));
metrics.add(new Metric("error.empty_document_summaries.rate"));
metrics.add(new Metric("error.invalid_query_parameter.rate"));
metrics.add(new Metric("error.internal_server_error.rate"));
metrics.add(new Metric("error.misconfigured_server.rate"));
metrics.add(new Metric("error.invalid_query_transformation.rate"));
metrics.add(new Metric("error.result_with_errors.rate"));
metrics.add(new Metric("error.unspecified.rate"));
metrics.add(new Metric("error.unhandled_exception.rate"));
return metrics;
}
private static void addSearchNodeExecutorMetrics(Set<Metric> metrics, String prefix) {
metrics.add(new Metric(prefix + ".queuesize.max"));
metrics.add(new Metric(prefix + ".queuesize.sum"));
metrics.add(new Metric(prefix + ".queuesize.count"));
metrics.add(new Metric(prefix + ".maxpending.last"));
metrics.add(new Metric(prefix + ".accepted.rate"));
metrics.add(new Metric(prefix + ".wakeups.rate"));
metrics.add(new Metric(prefix + ".utilization.max"));
metrics.add(new Metric(prefix + ".utilization.sum"));
metrics.add(new Metric(prefix + ".utilization.count"));
}
private static Set<Metric> getSearchNodeMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("content.proton.documentdb.documents.total.last"));
metrics.add(new Metric("content.proton.documentdb.documents.ready.last"));
metrics.add(new Metric("content.proton.documentdb.documents.active.last"));
metrics.add(new Metric("content.proton.documentdb.documents.removed.last"));
metrics.add(new Metric("content.proton.documentdb.index.docs_in_memory.last"));
metrics.add(new Metric("content.proton.documentdb.disk_usage.last"));
metrics.add(new Metric("content.proton.documentdb.memory_usage.allocated_bytes.max"));
metrics.add(new Metric("content.proton.documentdb.heart_beat_age.last"));
metrics.add(new Metric("content.proton.transport.query.count.rate"));
metrics.add(new Metric("content.proton.docsum.docs.rate"));
metrics.add(new Metric("content.proton.docsum.latency.max"));
metrics.add(new Metric("content.proton.docsum.latency.sum"));
metrics.add(new Metric("content.proton.docsum.latency.count"));
metrics.add(new Metric("content.proton.docsum.latency.average"));
metrics.add(new Metric("content.proton.transport.query.latency.max"));
metrics.add(new Metric("content.proton.transport.query.latency.sum"));
metrics.add(new Metric("content.proton.transport.query.latency.count"));
metrics.add(new Metric("content.proton.transport.query.latency.average"));
metrics.add(new Metric("content.proton.search_protocol.query.latency.max"));
metrics.add(new Metric("content.proton.search_protocol.query.latency.sum"));
metrics.add(new Metric("content.proton.search_protocol.query.latency.count"));
metrics.add(new Metric("content.proton.search_protocol.query.request_size.max"));
metrics.add(new Metric("content.proton.search_protocol.query.request_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.query.request_size.count"));
metrics.add(new Metric("content.proton.search_protocol.query.reply_size.max"));
metrics.add(new Metric("content.proton.search_protocol.query.reply_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.query.reply_size.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.latency.max"));
metrics.add(new Metric("content.proton.search_protocol.docsum.latency.sum"));
metrics.add(new Metric("content.proton.search_protocol.docsum.latency.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.max"));
metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.max"));
metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.requested_documents.count"));
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.proton");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.flush");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.match");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.docsum");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.shared");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.warmup");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.field_writer");
metrics.add(new Metric("content.proton.documentdb.job.total.average"));
metrics.add(new Metric("content.proton.documentdb.job.attribute_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.memory_index_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.disk_index_fusion.average"));
metrics.add(new Metric("content.proton.documentdb.job.document_store_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.document_store_compact.average"));
metrics.add(new Metric("content.proton.documentdb.job.bucket_move.average"));
metrics.add(new Metric("content.proton.documentdb.job.lid_space_compact.average"));
metrics.add(new Metric("content.proton.documentdb.job.removed_documents_prune.average"));
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.master");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.summary");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index_field_inverter");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index_field_writer");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.attribute_field_writer");
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_limit.last"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_limit.last"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_limit.last"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.highest_used_lid.last"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.highest_used_lid.last"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.highest_used_lid.last"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.used_lids.last"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.used_lids.last"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.used_lids.last"));
metrics.add(new Metric("content.proton.documentdb.bucket_move.buckets_pending.last"));
metrics.add(new Metric("content.proton.resource_usage.disk.average"));
metrics.add(new Metric("content.proton.resource_usage.disk_usage.total.max"));
metrics.add(new Metric("content.proton.resource_usage.disk_usage.total_utilization.max"));
metrics.add(new Metric("content.proton.resource_usage.disk_usage.transient.max"));
metrics.add(new Metric("content.proton.resource_usage.memory.average"));
metrics.add(new Metric("content.proton.resource_usage.memory_usage.total.max"));
metrics.add(new Metric("content.proton.resource_usage.memory_usage.total_utilization.max"));
metrics.add(new Metric("content.proton.resource_usage.memory_usage.transient.max"));
metrics.add(new Metric("content.proton.resource_usage.disk_utilization.average"));
metrics.add(new Metric("content.proton.resource_usage.memory_utilization.average"));
metrics.add(new Metric("content.proton.resource_usage.transient_memory.average"));
metrics.add(new Metric("content.proton.resource_usage.transient_disk.average"));
metrics.add(new Metric("content.proton.resource_usage.memory_mappings.max"));
metrics.add(new Metric("content.proton.resource_usage.open_file_descriptors.max"));
metrics.add(new Metric("content.proton.resource_usage.feeding_blocked.max"));
metrics.add(new Metric("content.proton.resource_usage.malloc_arena.max"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.address_space.max"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.last"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.setup.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.setup.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.setup.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.read.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.read.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.read.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.write.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.write.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.write.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.compact.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.compact.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.compact.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.other.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.other.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.other.count"));
metrics.add(new Metric("content.proton.transactionlog.entries.average"));
metrics.add(new Metric("content.proton.transactionlog.disk_usage.average"));
metrics.add(new Metric("content.proton.transactionlog.replay_time.last"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.memory_usage.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.hit_rate.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.lookups.rate"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.invalidations.rate"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.memory_usage.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.hit_rate.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.lookups.rate"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.invalidations.rate"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.matching.queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.soft_doomed_queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.max"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.count"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.average"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.average"));
metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doomed_queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.min"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.average"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.average"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.grouping_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.grouping_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.grouping_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.average"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.limited_queries.rate"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.max"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.sum"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.count"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.rate"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.max"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.sum"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.count"));
return metrics;
}
private static Set<Metric> getStorageMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("vds.datastored.alldisks.docs.average"));
metrics.add(new Metric("vds.datastored.alldisks.bytes.average"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.max"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.sum"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.count"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.average"));
metrics.add(new Metric("vds.visitor.allthreads.completed.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.completed.sum.rate"));
metrics.add(new Metric("vds.visitor.allthreads.created.sum.rate"));
metrics.add(new Metric("vds.visitor.allthreads.failed.sum.rate"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.max"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.sum"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.count"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.average"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.max"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.sum"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.count"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.average"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.max"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.count"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_window_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_window_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_window_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_waiting_threads.max"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_waiting_threads.sum"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_waiting_threads.count"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_active_tokens.max"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_active_tokens.sum"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_active_tokens.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allstripes.throttled_rpc_direct_dispatches.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allstripes.throttled_persistence_thread_polls.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allstripes.timeouts_waiting_for_throttle_token.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.test_and_set_failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.test_and_set_failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.test_and_set_failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.splitbuckets.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.joinbuckets.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.setbucketstates.count.rate"));
return metrics;
}
private static void addMetric(Set<Metric> metrics, String metricName, Iterable<String> aggregateSuffices) {
for (String suffix : aggregateSuffices) {
metrics.add(new Metric(metricName + "." + suffix));
}
}
} | class VespaMetricSet {
public static final MetricSet vespaMetricSet = new MetricSet("vespa",
getVespaMetrics(),
singleton(defaultVespaMetricSet));
private static Set<Metric> getVespaMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.addAll(getSearchNodeMetrics());
metrics.addAll(getStorageMetrics());
metrics.addAll(getDistributorMetrics());
metrics.addAll(getDocprocMetrics());
metrics.addAll(getClusterControllerMetrics());
metrics.addAll(getQrserverMetrics());
metrics.addAll(getContainerMetrics());
metrics.addAll(getConfigServerMetrics());
metrics.addAll(getSentinelMetrics());
metrics.addAll(getOtherMetrics());
return Collections.unmodifiableSet(metrics);
}
private static Set<Metric> getSentinelMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("sentinel.restarts.count"));
metrics.add(new Metric("sentinel.totalRestarts.last"));
metrics.add(new Metric("sentinel.uptime.last"));
metrics.add(new Metric("sentinel.running.count"));
metrics.add(new Metric("sentinel.running.last"));
return metrics;
}
private static Set<Metric> getOtherMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("slobrok.heartbeats.failed.count"));
metrics.add(new Metric("logd.processed.lines.count"));
metrics.add(new Metric("worker.connections.max"));
metrics.add(new Metric("endpoint.certificate.expiry.seconds"));
metrics.add(new Metric("jrt.transport.tls-certificate-verification-failures"));
metrics.add(new Metric("jrt.transport.peer-authorization-failures"));
metrics.add(new Metric("jrt.transport.server.tls-connections-established"));
metrics.add(new Metric("jrt.transport.client.tls-connections-established"));
metrics.add(new Metric("jrt.transport.server.unencrypted-connections-established"));
metrics.add(new Metric("jrt.transport.client.unencrypted-connections-established"));
metrics.add(new Metric("vds.server.network.tls-handshakes-failed"));
metrics.add(new Metric("vds.server.network.peer-authorization-failures"));
metrics.add(new Metric("vds.server.network.client.tls-connections-established"));
metrics.add(new Metric("vds.server.network.server.tls-connections-established"));
metrics.add(new Metric("vds.server.network.client.insecure-connections-established"));
metrics.add(new Metric("vds.server.network.server.insecure-connections-established"));
metrics.add(new Metric("vds.server.network.tls-connections-broken"));
metrics.add(new Metric("vds.server.network.failed-tls-config-reloads"));
metrics.add(new Metric("vds.server.fnet.num-connections"));
metrics.add(new Metric("node-certificate.expiry.seconds"));
return metrics;
}
private static Set<Metric> getConfigServerMetrics() {
Set<Metric> metrics =new LinkedHashSet<>();
metrics.add(new Metric("configserver.requests.count"));
metrics.add(new Metric("configserver.failedRequests.count"));
metrics.add(new Metric("configserver.latency.max"));
metrics.add(new Metric("configserver.latency.sum"));
metrics.add(new Metric("configserver.latency.count"));
metrics.add(new Metric("configserver.latency.average"));
metrics.add(new Metric("configserver.cacheConfigElems.last"));
metrics.add(new Metric("configserver.cacheChecksumElems.last"));
metrics.add(new Metric("configserver.hosts.last"));
metrics.add(new Metric("configserver.delayedResponses.count"));
metrics.add(new Metric("configserver.sessionChangeErrors.count"));
metrics.add(new Metric("configserver.zkZNodes.last"));
metrics.add(new Metric("configserver.zkAvgLatency.last"));
metrics.add(new Metric("configserver.zkMaxLatency.last"));
metrics.add(new Metric("configserver.zkConnections.last"));
metrics.add(new Metric("configserver.zkOutstandingRequests.last"));
return metrics;
}
private static Set<Metric> getContainerMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
addMetric(metrics, "jdisc.http.requests", List.of("rate", "count"));
metrics.add(new Metric("handled.requests.count"));
metrics.add(new Metric("handled.latency.max"));
metrics.add(new Metric("handled.latency.sum"));
metrics.add(new Metric("handled.latency.count"));
metrics.add(new Metric("handled.latency.average"));
metrics.add(new Metric("serverRejectedRequests.rate"));
metrics.add(new Metric("serverRejectedRequests.count"));
metrics.add(new Metric("serverThreadPoolSize.average"));
metrics.add(new Metric("serverThreadPoolSize.min"));
metrics.add(new Metric("serverThreadPoolSize.max"));
metrics.add(new Metric("serverThreadPoolSize.rate"));
metrics.add(new Metric("serverThreadPoolSize.count"));
metrics.add(new Metric("serverThreadPoolSize.last"));
metrics.add(new Metric("serverActiveThreads.average"));
metrics.add(new Metric("serverActiveThreads.min"));
metrics.add(new Metric("serverActiveThreads.max"));
metrics.add(new Metric("serverActiveThreads.rate"));
metrics.add(new Metric("serverActiveThreads.sum"));
metrics.add(new Metric("serverActiveThreads.count"));
metrics.add(new Metric("serverActiveThreads.last"));
metrics.add(new Metric("serverNumOpenConnections.average"));
metrics.add(new Metric("serverNumOpenConnections.max"));
metrics.add(new Metric("serverNumOpenConnections.last"));
metrics.add(new Metric("serverNumConnections.average"));
metrics.add(new Metric("serverNumConnections.max"));
metrics.add(new Metric("serverNumConnections.last"));
metrics.add(new Metric("serverBytesReceived.sum"));
metrics.add(new Metric("serverBytesReceived.count"));
metrics.add(new Metric("serverBytesSent.sum"));
metrics.add(new Metric("serverBytesSent.count"));
{
List<String> suffixes = List.of("sum", "count", "last", "min", "max");
addMetric(metrics, "jdisc.thread_pool.unhandled_exceptions", suffixes);
addMetric(metrics, "jdisc.thread_pool.work_queue.capacity", suffixes);
addMetric(metrics, "jdisc.thread_pool.work_queue.size", suffixes);
addMetric(metrics, "jdisc.thread_pool.rejected_tasks", suffixes);
addMetric(metrics, "jdisc.thread_pool.size", suffixes);
addMetric(metrics, "jdisc.thread_pool.max_allowed_size", suffixes);
addMetric(metrics, "jdisc.thread_pool.active_threads", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.max", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.min", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.reserved", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.busy", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.total", suffixes);
addMetric(metrics, "jdisc.http.jetty.threadpool.queue.size", suffixes);
}
metrics.add(new Metric("httpapi_latency.max"));
metrics.add(new Metric("httpapi_latency.sum"));
metrics.add(new Metric("httpapi_latency.count"));
metrics.add(new Metric("httpapi_latency.average"));
metrics.add(new Metric("httpapi_pending.max"));
metrics.add(new Metric("httpapi_pending.sum"));
metrics.add(new Metric("httpapi_pending.count"));
metrics.add(new Metric("httpapi_pending.average"));
metrics.add(new Metric("httpapi_num_operations.rate"));
metrics.add(new Metric("httpapi_num_updates.rate"));
metrics.add(new Metric("httpapi_num_removes.rate"));
metrics.add(new Metric("httpapi_num_puts.rate"));
metrics.add(new Metric("httpapi_succeeded.rate"));
metrics.add(new Metric("httpapi_failed.rate"));
metrics.add(new Metric("httpapi_parse_error.rate"));
addMetric(metrics, "httpapi_condition_not_met", List.of("rate"));
addMetric(metrics, "httpapi_not_found", List.of("rate"));
metrics.add(new Metric("mem.heap.total.average"));
metrics.add(new Metric("mem.heap.free.average"));
metrics.add(new Metric("mem.heap.used.average"));
metrics.add(new Metric("mem.heap.used.max"));
metrics.add(new Metric("jdisc.memory_mappings.max"));
metrics.add(new Metric("jdisc.open_file_descriptors.max"));
metrics.add(new Metric("mem.direct.total.average"));
metrics.add(new Metric("mem.direct.free.average"));
metrics.add(new Metric("mem.direct.used.average"));
metrics.add(new Metric("mem.direct.used.max"));
metrics.add(new Metric("mem.direct.count.max"));
metrics.add(new Metric("mem.native.total.average"));
metrics.add(new Metric("mem.native.free.average"));
metrics.add(new Metric("mem.native.used.average"));
metrics.add(new Metric("mem.native.used.max"));
metrics.add(new Metric("jdisc.gc.count.average"));
metrics.add(new Metric("jdisc.gc.count.max"));
metrics.add(new Metric("jdisc.gc.count.last"));
metrics.add(new Metric("jdisc.gc.ms.average"));
metrics.add(new Metric("jdisc.gc.ms.max"));
metrics.add(new Metric("jdisc.gc.ms.last"));
metrics.add(new Metric("jdisc.deactivated_containers.total.last"));
metrics.add(new Metric("jdisc.deactivated_containers.with_retained_refs.last"));
metrics.add(new Metric("athenz-tenant-cert.expiry.seconds.last"));
metrics.add(new Metric("container-iam-role.expiry.seconds"));
metrics.add(new Metric("jdisc.http.request.prematurely_closed.rate"));
addMetric(metrics, "jdisc.http.request.requests_per_connection", List.of("sum", "count", "min", "max", "average"));
metrics.add(new Metric("http.status.1xx.rate"));
metrics.add(new Metric("http.status.2xx.rate"));
metrics.add(new Metric("http.status.3xx.rate"));
metrics.add(new Metric("http.status.4xx.rate"));
metrics.add(new Metric("http.status.5xx.rate"));
metrics.add(new Metric("http.status.401.rate"));
metrics.add(new Metric("http.status.403.rate"));
metrics.add(new Metric("jdisc.http.request.uri_length.max"));
metrics.add(new Metric("jdisc.http.request.uri_length.sum"));
metrics.add(new Metric("jdisc.http.request.uri_length.count"));
metrics.add(new Metric("jdisc.http.request.uri_length.average"));
metrics.add(new Metric("jdisc.http.request.content_size.max"));
metrics.add(new Metric("jdisc.http.request.content_size.sum"));
metrics.add(new Metric("jdisc.http.request.content_size.count"));
metrics.add(new Metric("jdisc.http.request.content_size.average"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.missing_client_cert.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.expired_client_cert.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.invalid_client_cert.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.incompatible_protocols.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.incompatible_ciphers.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.unknown.rate"));
metrics.add(new Metric("jdisc.http.handler.unhandled_exceptions.rate"));
addMetric(metrics, "jdisc.http.filtering.request.handled", List.of("rate"));
addMetric(metrics, "jdisc.http.filtering.request.unhandled", List.of("rate"));
addMetric(metrics, "jdisc.http.filtering.response.handled", List.of("rate"));
addMetric(metrics, "jdisc.http.filtering.response.unhandled", List.of("rate"));
addMetric(metrics, "jdisc.application.failed_component_graphs", List.of("rate"));
addMetric(metrics, "jdisc.http.filter.rule.blocked_requests", List.of("rate"));
addMetric(metrics, "jdisc.http.filter.rule.allowed_requests", List.of("rate"));
return metrics;
}
private static Set<Metric> getClusterControllerMetrics() {
Set<Metric> metrics =new LinkedHashSet<>();
metrics.add(new Metric("cluster-controller.down.count.last"));
metrics.add(new Metric("cluster-controller.initializing.count.last"));
metrics.add(new Metric("cluster-controller.maintenance.count.last"));
metrics.add(new Metric("cluster-controller.retired.count.last"));
metrics.add(new Metric("cluster-controller.stopping.count.last"));
metrics.add(new Metric("cluster-controller.up.count.last"));
metrics.add(new Metric("cluster-controller.cluster-state-change.count"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.last"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.max"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.sum"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.count"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.last"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.max"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.sum"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.count"));
metrics.add(new Metric("cluster-controller.work-ms.last"));
metrics.add(new Metric("cluster-controller.work-ms.sum"));
metrics.add(new Metric("cluster-controller.work-ms.count"));
metrics.add(new Metric("cluster-controller.is-master.last"));
metrics.add(new Metric("cluster-controller.remote-task-queue.size.last"));
metrics.add(new Metric("cluster-controller.node-event.count"));
metrics.add(new Metric("cluster-controller.resource_usage.nodes_above_limit.last"));
metrics.add(new Metric("cluster-controller.resource_usage.nodes_above_limit.max"));
metrics.add(new Metric("cluster-controller.resource_usage.max_memory_utilization.last"));
metrics.add(new Metric("cluster-controller.resource_usage.max_memory_utilization.max"));
metrics.add(new Metric("cluster-controller.resource_usage.max_disk_utilization.last"));
metrics.add(new Metric("cluster-controller.resource_usage.max_disk_utilization.max"));
metrics.add(new Metric("cluster-controller.resource_usage.disk_limit.last"));
metrics.add(new Metric("cluster-controller.resource_usage.memory_limit.last"));
metrics.add(new Metric("reindexing.progress.last"));
return metrics;
}
private static Set<Metric> getDocprocMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("documents_processed.rate"));
return metrics;
}
private static Set<Metric> getQrserverMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("peak_qps.max"));
metrics.add(new Metric("search_connections.max"));
metrics.add(new Metric("search_connections.sum"));
metrics.add(new Metric("search_connections.count"));
metrics.add(new Metric("search_connections.average"));
metrics.add(new Metric("feed.latency.max"));
metrics.add(new Metric("feed.latency.sum"));
metrics.add(new Metric("feed.latency.count"));
metrics.add(new Metric("feed.latency.average"));
metrics.add(new Metric("feed.http-requests.count"));
metrics.add(new Metric("feed.http-requests.rate"));
metrics.add(new Metric("queries.rate"));
metrics.add(new Metric("query_container_latency.max"));
metrics.add(new Metric("query_container_latency.sum"));
metrics.add(new Metric("query_container_latency.count"));
metrics.add(new Metric("query_container_latency.average"));
metrics.add(new Metric("query_latency.max"));
metrics.add(new Metric("query_latency.sum"));
metrics.add(new Metric("query_latency.count"));
metrics.add(new Metric("query_latency.average"));
metrics.add(new Metric("query_latency.95percentile"));
metrics.add(new Metric("query_latency.99percentile"));
metrics.add(new Metric("failed_queries.rate"));
metrics.add(new Metric("degraded_queries.rate"));
metrics.add(new Metric("hits_per_query.max"));
metrics.add(new Metric("hits_per_query.sum"));
metrics.add(new Metric("hits_per_query.count"));
metrics.add(new Metric("hits_per_query.average"));
metrics.add(new Metric("hits_per_query.95percentile"));
metrics.add(new Metric("hits_per_query.99percentile"));
metrics.add(new Metric("query_hit_offset.max"));
metrics.add(new Metric("query_hit_offset.sum"));
metrics.add(new Metric("query_hit_offset.count"));
metrics.add(new Metric("documents_covered.count"));
metrics.add(new Metric("documents_total.count"));
metrics.add(new Metric("dispatch_internal.rate"));
metrics.add(new Metric("dispatch_fdispatch.rate"));
addMetric(metrics, "jdisc.render.latency", Set.of("min", "max", "count", "sum", "last", "average"));
addMetric(metrics, "query_item_count", Set.of("max", "sum", "count"));
metrics.add(new Metric("totalhits_per_query.max"));
metrics.add(new Metric("totalhits_per_query.sum"));
metrics.add(new Metric("totalhits_per_query.count"));
metrics.add(new Metric("totalhits_per_query.average"));
metrics.add(new Metric("totalhits_per_query.95percentile"));
metrics.add(new Metric("totalhits_per_query.99percentile"));
metrics.add(new Metric("empty_results.rate"));
metrics.add(new Metric("requestsOverQuota.rate"));
metrics.add(new Metric("requestsOverQuota.count"));
metrics.add(new Metric("relevance.at_1.sum"));
metrics.add(new Metric("relevance.at_1.count"));
metrics.add(new Metric("relevance.at_1.average"));
metrics.add(new Metric("relevance.at_3.sum"));
metrics.add(new Metric("relevance.at_3.count"));
metrics.add(new Metric("relevance.at_3.average"));
metrics.add(new Metric("relevance.at_10.sum"));
metrics.add(new Metric("relevance.at_10.count"));
metrics.add(new Metric("relevance.at_10.average"));
metrics.add(new Metric("error.timeout.rate"));
metrics.add(new Metric("error.backends_oos.rate"));
metrics.add(new Metric("error.plugin_failure.rate"));
metrics.add(new Metric("error.backend_communication_error.rate"));
metrics.add(new Metric("error.empty_document_summaries.rate"));
metrics.add(new Metric("error.invalid_query_parameter.rate"));
metrics.add(new Metric("error.internal_server_error.rate"));
metrics.add(new Metric("error.misconfigured_server.rate"));
metrics.add(new Metric("error.invalid_query_transformation.rate"));
metrics.add(new Metric("error.result_with_errors.rate"));
metrics.add(new Metric("error.unspecified.rate"));
metrics.add(new Metric("error.unhandled_exception.rate"));
return metrics;
}
private static void addSearchNodeExecutorMetrics(Set<Metric> metrics, String prefix) {
metrics.add(new Metric(prefix + ".queuesize.max"));
metrics.add(new Metric(prefix + ".queuesize.sum"));
metrics.add(new Metric(prefix + ".queuesize.count"));
metrics.add(new Metric(prefix + ".maxpending.last"));
metrics.add(new Metric(prefix + ".accepted.rate"));
metrics.add(new Metric(prefix + ".wakeups.rate"));
metrics.add(new Metric(prefix + ".utilization.max"));
metrics.add(new Metric(prefix + ".utilization.sum"));
metrics.add(new Metric(prefix + ".utilization.count"));
}
private static Set<Metric> getSearchNodeMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("content.proton.documentdb.documents.total.last"));
metrics.add(new Metric("content.proton.documentdb.documents.ready.last"));
metrics.add(new Metric("content.proton.documentdb.documents.active.last"));
metrics.add(new Metric("content.proton.documentdb.documents.removed.last"));
metrics.add(new Metric("content.proton.documentdb.index.docs_in_memory.last"));
metrics.add(new Metric("content.proton.documentdb.disk_usage.last"));
metrics.add(new Metric("content.proton.documentdb.memory_usage.allocated_bytes.max"));
metrics.add(new Metric("content.proton.documentdb.heart_beat_age.last"));
metrics.add(new Metric("content.proton.transport.query.count.rate"));
metrics.add(new Metric("content.proton.docsum.docs.rate"));
metrics.add(new Metric("content.proton.docsum.latency.max"));
metrics.add(new Metric("content.proton.docsum.latency.sum"));
metrics.add(new Metric("content.proton.docsum.latency.count"));
metrics.add(new Metric("content.proton.docsum.latency.average"));
metrics.add(new Metric("content.proton.transport.query.latency.max"));
metrics.add(new Metric("content.proton.transport.query.latency.sum"));
metrics.add(new Metric("content.proton.transport.query.latency.count"));
metrics.add(new Metric("content.proton.transport.query.latency.average"));
metrics.add(new Metric("content.proton.search_protocol.query.latency.max"));
metrics.add(new Metric("content.proton.search_protocol.query.latency.sum"));
metrics.add(new Metric("content.proton.search_protocol.query.latency.count"));
metrics.add(new Metric("content.proton.search_protocol.query.request_size.max"));
metrics.add(new Metric("content.proton.search_protocol.query.request_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.query.request_size.count"));
metrics.add(new Metric("content.proton.search_protocol.query.reply_size.max"));
metrics.add(new Metric("content.proton.search_protocol.query.reply_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.query.reply_size.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.latency.max"));
metrics.add(new Metric("content.proton.search_protocol.docsum.latency.sum"));
metrics.add(new Metric("content.proton.search_protocol.docsum.latency.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.max"));
metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.max"));
metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.requested_documents.count"));
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.proton");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.flush");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.match");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.docsum");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.shared");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.warmup");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.field_writer");
metrics.add(new Metric("content.proton.documentdb.job.total.average"));
metrics.add(new Metric("content.proton.documentdb.job.attribute_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.memory_index_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.disk_index_fusion.average"));
metrics.add(new Metric("content.proton.documentdb.job.document_store_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.document_store_compact.average"));
metrics.add(new Metric("content.proton.documentdb.job.bucket_move.average"));
metrics.add(new Metric("content.proton.documentdb.job.lid_space_compact.average"));
metrics.add(new Metric("content.proton.documentdb.job.removed_documents_prune.average"));
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.master");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.summary");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index_field_inverter");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index_field_writer");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.attribute_field_writer");
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_limit.last"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_limit.last"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_limit.last"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.highest_used_lid.last"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.highest_used_lid.last"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.highest_used_lid.last"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.used_lids.last"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.used_lids.last"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.used_lids.last"));
metrics.add(new Metric("content.proton.documentdb.bucket_move.buckets_pending.last"));
metrics.add(new Metric("content.proton.resource_usage.disk.average"));
metrics.add(new Metric("content.proton.resource_usage.disk_usage.total.max"));
metrics.add(new Metric("content.proton.resource_usage.disk_usage.total_utilization.max"));
metrics.add(new Metric("content.proton.resource_usage.disk_usage.transient.max"));
metrics.add(new Metric("content.proton.resource_usage.memory.average"));
metrics.add(new Metric("content.proton.resource_usage.memory_usage.total.max"));
metrics.add(new Metric("content.proton.resource_usage.memory_usage.total_utilization.max"));
metrics.add(new Metric("content.proton.resource_usage.memory_usage.transient.max"));
metrics.add(new Metric("content.proton.resource_usage.disk_utilization.average"));
metrics.add(new Metric("content.proton.resource_usage.memory_utilization.average"));
metrics.add(new Metric("content.proton.resource_usage.transient_memory.average"));
metrics.add(new Metric("content.proton.resource_usage.transient_disk.average"));
metrics.add(new Metric("content.proton.resource_usage.memory_mappings.max"));
metrics.add(new Metric("content.proton.resource_usage.open_file_descriptors.max"));
metrics.add(new Metric("content.proton.resource_usage.feeding_blocked.max"));
metrics.add(new Metric("content.proton.resource_usage.malloc_arena.max"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.address_space.max"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.last"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.setup.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.setup.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.setup.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.read.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.read.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.read.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.write.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.write.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.write.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.compact.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.compact.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.compact.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.other.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.other.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.other.count"));
metrics.add(new Metric("content.proton.transactionlog.entries.average"));
metrics.add(new Metric("content.proton.transactionlog.disk_usage.average"));
metrics.add(new Metric("content.proton.transactionlog.replay_time.last"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.memory_usage.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.hit_rate.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.lookups.rate"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.invalidations.rate"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.memory_usage.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.hit_rate.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.lookups.rate"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.invalidations.rate"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.matching.queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.soft_doomed_queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.max"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.count"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.average"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.average"));
metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doomed_queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.min"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.average"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.average"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.grouping_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.grouping_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.grouping_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.average"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.limited_queries.rate"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.max"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.sum"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.count"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.rate"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.max"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.sum"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.count"));
return metrics;
}
private static Set<Metric> getStorageMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("vds.datastored.alldisks.docs.average"));
metrics.add(new Metric("vds.datastored.alldisks.bytes.average"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.max"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.sum"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.count"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.average"));
metrics.add(new Metric("vds.visitor.allthreads.completed.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.completed.sum.rate"));
metrics.add(new Metric("vds.visitor.allthreads.created.sum.rate"));
metrics.add(new Metric("vds.visitor.allthreads.failed.sum.rate"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.max"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.sum"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.count"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.average"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.max"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.sum"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.count"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.average"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.max"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.count"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_window_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_window_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_window_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_waiting_threads.max"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_waiting_threads.sum"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_waiting_threads.count"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_active_tokens.max"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_active_tokens.sum"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_active_tokens.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allstripes.throttled_rpc_direct_dispatches.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allstripes.throttled_persistence_thread_polls.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allstripes.timeouts_waiting_for_throttle_token.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.test_and_set_failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.test_and_set_failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.test_and_set_failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.splitbuckets.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.joinbuckets.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.setbucketstates.count.rate"));
return metrics;
}
private static void addMetric(Set<Metric> metrics, String metricName, Iterable<String> aggregateSuffices) {
for (String suffix : aggregateSuffices) {
metrics.add(new Metric(metricName + "." + suffix));
}
}
} | |
```suggestion host("host5", resources(2), 4, "7.2", cluster)); ``` | public List<HostSpec> prepare(ClusterSpec cluster, Capacity capacity, ProvisionLogger logger) {
if (cluster.id().value().equals("container")) {
if (invocation == 0) {
invocation++;
return List.of(host("host0", resources(3), 0, "7.3", cluster),
host("host1", resources(3), 1, "7.3", cluster),
host("host2", resources(3), 2, "7.2", cluster));
} else if (invocation == 1) {
invocation++;
return List.of(host("host3", resources(2), 2, "7.2", cluster),
host("host4", resources(2), 3, "7.2", cluster),
host("host4", resources(2), 4, "7.2", cluster));
} else {
throw new RuntimeException("Unexpected third invocation");
}
}
else {
List<HostSpec> hosts = new ArrayList<>();
for (int i = 0; i < capacity.maxResources().nodes(); i++) {
hosts.add(host(cluster.id().value() + i,
capacity.maxResources().nodeResources(),
i,
cluster.vespaVersion().toString(),
cluster));
}
return hosts;
}
} | host("host4", resources(2), 4, "7.2", cluster)); | public List<HostSpec> prepare(ClusterSpec cluster, Capacity capacity, ProvisionLogger logger) {
if (cluster.id().value().equals("container")) {
if (invocation == 0) {
invocation++;
return List.of(host("host0", resources(3), 0, "7.3", cluster),
host("host1", resources(3), 1, "7.3", cluster),
host("host2", resources(3), 2, "7.2", cluster));
} else if (invocation == 1) {
invocation++;
return List.of(host("host2", resources(2), 2, "7.2", cluster),
host("host3", resources(2), 3, "7.2", cluster),
host("host4", resources(2), 4, "7.2", cluster));
} else {
throw new RuntimeException("Unexpected third invocation");
}
}
else {
List<HostSpec> hosts = new ArrayList<>();
for (int i = 0; i < capacity.maxResources().nodes(); i++) {
hosts.add(host(cluster.id().value() + i,
capacity.maxResources().nodeResources(),
i,
cluster.vespaVersion().toString(),
cluster));
}
return hosts;
}
} | class VersionProvisioner implements HostProvisioner {
int invocation = 0;
@Override
public HostSpec allocateHost(String alias) {
throw new RuntimeException();
}
@Override
private HostSpec host(String hostname, NodeResources resources, int index, String version, ClusterSpec cluster) {
var membership = ClusterMembership.from(cluster.with(Optional.of(ClusterSpec.Group.from(index))), index);
return new HostSpec(hostname,
resources,
resources,
resources,
membership,
Optional.of(Version.fromString(version)),
Optional.empty(),
Optional.empty());
}
} | class VersionProvisioner implements HostProvisioner {
int invocation = 0;
@Override
public HostSpec allocateHost(String alias) {
throw new RuntimeException();
}
@Override
private HostSpec host(String hostname, NodeResources resources, int index, String version, ClusterSpec cluster) {
var membership = ClusterMembership.from(cluster.with(Optional.of(ClusterSpec.Group.from(index))), index);
return new HostSpec(hostname,
resources,
resources,
resources,
membership,
Optional.of(Version.fromString(version)),
Optional.empty(),
Optional.empty());
}
} |
```suggestion host("host2", resources(3), 2, "7.3", cluster)); ``` | public List<HostSpec> prepare(ClusterSpec cluster, Capacity capacity, ProvisionLogger logger) {
if (cluster.id().value().equals("container")) {
if (invocation == 0) {
invocation++;
return List.of(host("host0", resources(3), 0, "7.3", cluster),
host("host1", resources(3), 1, "7.3", cluster),
host("host2", resources(3), 2, "7.2", cluster));
} else if (invocation == 1) {
invocation++;
return List.of(host("host3", resources(2), 2, "7.2", cluster),
host("host4", resources(2), 3, "7.2", cluster),
host("host4", resources(2), 4, "7.2", cluster));
} else {
throw new RuntimeException("Unexpected third invocation");
}
}
else {
List<HostSpec> hosts = new ArrayList<>();
for (int i = 0; i < capacity.maxResources().nodes(); i++) {
hosts.add(host(cluster.id().value() + i,
capacity.maxResources().nodeResources(),
i,
cluster.vespaVersion().toString(),
cluster));
}
return hosts;
}
} | host("host2", resources(3), 2, "7.2", cluster)); | public List<HostSpec> prepare(ClusterSpec cluster, Capacity capacity, ProvisionLogger logger) {
if (cluster.id().value().equals("container")) {
if (invocation == 0) {
invocation++;
return List.of(host("host0", resources(3), 0, "7.3", cluster),
host("host1", resources(3), 1, "7.3", cluster),
host("host2", resources(3), 2, "7.2", cluster));
} else if (invocation == 1) {
invocation++;
return List.of(host("host2", resources(2), 2, "7.2", cluster),
host("host3", resources(2), 3, "7.2", cluster),
host("host4", resources(2), 4, "7.2", cluster));
} else {
throw new RuntimeException("Unexpected third invocation");
}
}
else {
List<HostSpec> hosts = new ArrayList<>();
for (int i = 0; i < capacity.maxResources().nodes(); i++) {
hosts.add(host(cluster.id().value() + i,
capacity.maxResources().nodeResources(),
i,
cluster.vespaVersion().toString(),
cluster));
}
return hosts;
}
} | class VersionProvisioner implements HostProvisioner {
int invocation = 0;
@Override
public HostSpec allocateHost(String alias) {
throw new RuntimeException();
}
@Override
private HostSpec host(String hostname, NodeResources resources, int index, String version, ClusterSpec cluster) {
var membership = ClusterMembership.from(cluster.with(Optional.of(ClusterSpec.Group.from(index))), index);
return new HostSpec(hostname,
resources,
resources,
resources,
membership,
Optional.of(Version.fromString(version)),
Optional.empty(),
Optional.empty());
}
} | class VersionProvisioner implements HostProvisioner {
int invocation = 0;
@Override
public HostSpec allocateHost(String alias) {
throw new RuntimeException();
}
@Override
private HostSpec host(String hostname, NodeResources resources, int index, String version, ClusterSpec cluster) {
var membership = ClusterMembership.from(cluster.with(Optional.of(ClusterSpec.Group.from(index))), index);
return new HostSpec(hostname,
resources,
resources,
resources,
membership,
Optional.of(Version.fromString(version)),
Optional.empty(),
Optional.empty());
}
} |
It must be like this actually - this isn't the model version but the node's version, and these versions decide what model versions we'll build. | public List<HostSpec> prepare(ClusterSpec cluster, Capacity capacity, ProvisionLogger logger) {
if (cluster.id().value().equals("container")) {
if (invocation == 0) {
invocation++;
return List.of(host("host0", resources(3), 0, "7.3", cluster),
host("host1", resources(3), 1, "7.3", cluster),
host("host2", resources(3), 2, "7.2", cluster));
} else if (invocation == 1) {
invocation++;
return List.of(host("host3", resources(2), 2, "7.2", cluster),
host("host4", resources(2), 3, "7.2", cluster),
host("host4", resources(2), 4, "7.2", cluster));
} else {
throw new RuntimeException("Unexpected third invocation");
}
}
else {
List<HostSpec> hosts = new ArrayList<>();
for (int i = 0; i < capacity.maxResources().nodes(); i++) {
hosts.add(host(cluster.id().value() + i,
capacity.maxResources().nodeResources(),
i,
cluster.vespaVersion().toString(),
cluster));
}
return hosts;
}
} | host("host2", resources(3), 2, "7.2", cluster)); | public List<HostSpec> prepare(ClusterSpec cluster, Capacity capacity, ProvisionLogger logger) {
if (cluster.id().value().equals("container")) {
if (invocation == 0) {
invocation++;
return List.of(host("host0", resources(3), 0, "7.3", cluster),
host("host1", resources(3), 1, "7.3", cluster),
host("host2", resources(3), 2, "7.2", cluster));
} else if (invocation == 1) {
invocation++;
return List.of(host("host2", resources(2), 2, "7.2", cluster),
host("host3", resources(2), 3, "7.2", cluster),
host("host4", resources(2), 4, "7.2", cluster));
} else {
throw new RuntimeException("Unexpected third invocation");
}
}
else {
List<HostSpec> hosts = new ArrayList<>();
for (int i = 0; i < capacity.maxResources().nodes(); i++) {
hosts.add(host(cluster.id().value() + i,
capacity.maxResources().nodeResources(),
i,
cluster.vespaVersion().toString(),
cluster));
}
return hosts;
}
} | class VersionProvisioner implements HostProvisioner {
int invocation = 0;
@Override
public HostSpec allocateHost(String alias) {
throw new RuntimeException();
}
@Override
private HostSpec host(String hostname, NodeResources resources, int index, String version, ClusterSpec cluster) {
var membership = ClusterMembership.from(cluster.with(Optional.of(ClusterSpec.Group.from(index))), index);
return new HostSpec(hostname,
resources,
resources,
resources,
membership,
Optional.of(Version.fromString(version)),
Optional.empty(),
Optional.empty());
}
} | class VersionProvisioner implements HostProvisioner {
int invocation = 0;
@Override
public HostSpec allocateHost(String alias) {
throw new RuntimeException();
}
@Override
private HostSpec host(String hostname, NodeResources resources, int index, String version, ClusterSpec cluster) {
var membership = ClusterMembership.from(cluster.with(Optional.of(ClusterSpec.Group.from(index))), index);
return new HostSpec(hostname,
resources,
resources,
resources,
membership,
Optional.of(Version.fromString(version)),
Optional.empty(),
Optional.empty());
}
} |
thanks! | public List<HostSpec> prepare(ClusterSpec cluster, Capacity capacity, ProvisionLogger logger) {
if (cluster.id().value().equals("container")) {
if (invocation == 0) {
invocation++;
return List.of(host("host0", resources(3), 0, "7.3", cluster),
host("host1", resources(3), 1, "7.3", cluster),
host("host2", resources(3), 2, "7.2", cluster));
} else if (invocation == 1) {
invocation++;
return List.of(host("host3", resources(2), 2, "7.2", cluster),
host("host4", resources(2), 3, "7.2", cluster),
host("host4", resources(2), 4, "7.2", cluster));
} else {
throw new RuntimeException("Unexpected third invocation");
}
}
else {
List<HostSpec> hosts = new ArrayList<>();
for (int i = 0; i < capacity.maxResources().nodes(); i++) {
hosts.add(host(cluster.id().value() + i,
capacity.maxResources().nodeResources(),
i,
cluster.vespaVersion().toString(),
cluster));
}
return hosts;
}
} | host("host4", resources(2), 4, "7.2", cluster)); | public List<HostSpec> prepare(ClusterSpec cluster, Capacity capacity, ProvisionLogger logger) {
if (cluster.id().value().equals("container")) {
if (invocation == 0) {
invocation++;
return List.of(host("host0", resources(3), 0, "7.3", cluster),
host("host1", resources(3), 1, "7.3", cluster),
host("host2", resources(3), 2, "7.2", cluster));
} else if (invocation == 1) {
invocation++;
return List.of(host("host2", resources(2), 2, "7.2", cluster),
host("host3", resources(2), 3, "7.2", cluster),
host("host4", resources(2), 4, "7.2", cluster));
} else {
throw new RuntimeException("Unexpected third invocation");
}
}
else {
List<HostSpec> hosts = new ArrayList<>();
for (int i = 0; i < capacity.maxResources().nodes(); i++) {
hosts.add(host(cluster.id().value() + i,
capacity.maxResources().nodeResources(),
i,
cluster.vespaVersion().toString(),
cluster));
}
return hosts;
}
} | class VersionProvisioner implements HostProvisioner {
int invocation = 0;
@Override
public HostSpec allocateHost(String alias) {
throw new RuntimeException();
}
@Override
private HostSpec host(String hostname, NodeResources resources, int index, String version, ClusterSpec cluster) {
var membership = ClusterMembership.from(cluster.with(Optional.of(ClusterSpec.Group.from(index))), index);
return new HostSpec(hostname,
resources,
resources,
resources,
membership,
Optional.of(Version.fromString(version)),
Optional.empty(),
Optional.empty());
}
} | class VersionProvisioner implements HostProvisioner {
int invocation = 0;
@Override
public HostSpec allocateHost(String alias) {
throw new RuntimeException();
}
@Override
private HostSpec host(String hostname, NodeResources resources, int index, String version, ClusterSpec cluster) {
var membership = ClusterMembership.from(cluster.with(Optional.of(ClusterSpec.Group.from(index))), index);
return new HostSpec(hostname,
resources,
resources,
resources,
membership,
Optional.of(Version.fromString(version)),
Optional.empty(),
Optional.empty());
}
} |
Not sure if you want this? :) | public void set(CompoundName key, Object value, Map<String,String> context) {
System.out.println("Setting " + key);
try {
if (key.size() == 2 && key.first().equals(Model.MODEL)) {
Model model = query.getModel();
if (key.last().equals(Model.QUERY_STRING))
model.setQueryString(asString(value, ""));
else if (key.last().equals(Model.TYPE))
model.setType(asString(value, "ANY"));
else if (key.last().equals(Model.FILTER))
model.setFilter(asString(value, ""));
else if (key.last().equals(Model.DEFAULT_INDEX))
model.setDefaultIndex(asString(value, ""));
else if (key.last().equals(Model.LANGUAGE))
model.setLanguage(asString(value, ""));
else if (key.last().equals(Model.LOCALE))
model.setLocale(asString(value, ""));
else if (key.last().equals(Model.ENCODING))
model.setEncoding(asString(value,""));
else if (key.last().equals(Model.SEARCH_PATH))
model.setSearchPath(asString(value,""));
else if (key.last().equals(Model.SOURCES))
model.setSources(asString(value,""));
else if (key.last().equals(Model.RESTRICT))
model.setRestrict(asString(value,""));
else
throwIllegalParameter(key.last(), Model.MODEL);
}
else if (key.first().equals(Ranking.RANKING)) {
Ranking ranking = query.getRanking();
if (key.size() == 2) {
if (key.last().equals(Ranking.LOCATION))
ranking.setLocation(asString(value,""));
else if (key.last().equals(Ranking.PROFILE))
ranking.setProfile(asString(value,""));
else if (key.last().equals(Ranking.SORTING))
ranking.setSorting(asString(value,""));
else if (key.last().equals(Ranking.FRESHNESS))
ranking.setFreshness(asString(value, ""));
else if (key.last().equals(Ranking.QUERYCACHE))
ranking.setQueryCache(asBoolean(value, false));
else if (key.last().equals(Ranking.RERANKCOUNT))
ranking.setRerankCount(asInteger(value, null));
else if (key.last().equals(Ranking.LIST_FEATURES))
ranking.setListFeatures(asBoolean(value,false));
else
throwIllegalParameter(key.last(), Ranking.RANKING);
}
else if (key.size() >= 3 && key.get(1).equals(Ranking.MATCH_PHASE)) {
if (key.size() == 3) {
MatchPhase matchPhase = ranking.getMatchPhase();
if (key.last().equals(MatchPhase.ATTRIBUTE))
matchPhase.setAttribute(asString(value, null));
else if (key.last().equals(MatchPhase.ASCENDING))
matchPhase.setAscending(asBoolean(value, false));
else if (key.last().equals(MatchPhase.MAX_HITS))
matchPhase.setMaxHits(asLong(value, null));
else if (key.last().equals(MatchPhase.MAX_FILTER_COVERAGE))
matchPhase.setMaxFilterCoverage(asDouble(value, 0.2));
else
throwIllegalParameter(key.rest().toString(), Ranking.MATCH_PHASE);
}
else if (key.size() > 3 && key.get(2).equals(Ranking.DIVERSITY)) {
Diversity diversity = ranking.getMatchPhase().getDiversity();
if (key.last().equals(Diversity.ATTRIBUTE)) {
diversity.setAttribute(asString(value, null));
}
else if (key.last().equals(Diversity.MINGROUPS)) {
diversity.setMinGroups(asLong(value, null));
}
else if ((key.size() > 4) && key.get(3).equals(Diversity.CUTOFF)) {
if (key.last().equals(Diversity.FACTOR))
diversity.setCutoffFactor(asDouble(value, 10.0));
else if (key.last().equals(Diversity.STRATEGY))
diversity.setCutoffStrategy(asString(value, "loose"));
else
throwIllegalParameter(key.rest().toString(), Diversity.CUTOFF);
}
else {
throwIllegalParameter(key.rest().toString(), Ranking.DIVERSITY);
}
}
}
else if (key.size() == 3 && key.get(1).equals(Ranking.SOFTTIMEOUT)) {
SoftTimeout soft = ranking.getSoftTimeout();
if (key.last().equals(SoftTimeout.ENABLE))
soft.setEnable(asBoolean(value, true));
else if (key.last().equals(SoftTimeout.FACTOR))
soft.setFactor(asDouble(value, null));
else if (key.last().equals(SoftTimeout.TAILCOST))
soft.setTailcost(asDouble(value, null));
else
throwIllegalParameter(key.rest().toString(), Ranking.SOFTTIMEOUT);
}
else if (key.size() == 3 && key.get(1).equals(Ranking.MATCHING)) {
Matching matching = ranking.getMatching();
if (key.last().equals(Matching.TERMWISELIMIT))
matching.setTermwiselimit(asDouble(value, 1.0));
else if (key.last().equals(Matching.NUMTHREADSPERSEARCH))
matching.setNumThreadsPerSearch(asInteger(value, 1));
else if (key.last().equals(Matching.NUMSEARCHPARTITIIONS))
matching.setNumSearchPartitions(asInteger(value, 1));
else if (key.last().equals(Matching.MINHITSPERTHREAD))
matching.setMinHitsPerThread(asInteger(value, 0));
else if (key.last().equals(Matching.POST_FILTER_THRESHOLD))
matching.setPostFilterThreshold(asDouble(value, 1.0));
else if (key.last().equals(Matching.APPROXIMATE_THRESHOLD))
matching.setApproximateThreshold(asDouble(value, 0.05));
else
throwIllegalParameter(key.rest().toString(), Ranking.MATCHING);
}
else if (key.size() > 2) {
String restKey = key.rest().rest().toString();
chained().requireSettable(key, value, context);
if (key.get(1).equals(Ranking.FEATURES))
setRankFeature(query, restKey, toSpecifiedType(restKey,
value,
profileRegistry.getTypeRegistry().getComponent("features"),
context));
else if (key.get(1).equals(Ranking.PROPERTIES))
ranking.getProperties().put(restKey, toSpecifiedType(restKey,
value,
profileRegistry.getTypeRegistry().getComponent("properties"),
context));
else
throwIllegalParameter(key.rest().toString(), Ranking.RANKING);
}
}
else if (key.first().equals(Presentation.PRESENTATION)) {
if (key.size() == 2) {
if (key.last().equals(Presentation.BOLDING))
query.getPresentation().setBolding(asBoolean(value, true));
else if (key.last().equals(Presentation.SUMMARY))
query.getPresentation().setSummary(asString(value, ""));
else if (key.last().equals(Presentation.FORMAT))
query.getPresentation().setFormat(asString(value, ""));
else if (key.last().equals(Presentation.TIMING))
query.getPresentation().setTiming(asBoolean(value, true));
else if (key.last().equals(Presentation.SUMMARY_FIELDS))
query.getPresentation().setSummaryFields(asString(value, ""));
else
throwIllegalParameter(key.last(), Presentation.PRESENTATION);
}
else if (key.size() == 3 && key.get(1).equals(Presentation.FORMAT)) {
if (key.last().equals(Presentation.TENSORS))
query.getPresentation().setTensorShortForm(asString(value, ""));
else
throwIllegalParameter(key.last(), Presentation.FORMAT);
}
else
throwIllegalParameter(key.last(), Presentation.PRESENTATION);
}
else if (key.first().equals(Select.SELECT)) {
if (key.size() == 1) {
query.getSelect().setGroupingExpressionString(asString(value, ""));
}
else if (key.size() == 2) {
if (key.last().equals(Select.WHERE))
query.getSelect().setWhereString(asString(value, ""));
else if (key.last().equals(Select.GROUPING))
query.getSelect().setGroupingString(asString(value, ""));
else
throwIllegalParameter(key.rest().toString(), Select.SELECT);
}
else {
throwIllegalParameter(key.last(), Select.SELECT);
}
}
else if (key.size() == 1) {
if (key.equals(Query.HITS))
query.setHits(asInteger(value,10));
else if (key.equals(Query.OFFSET))
query.setOffset(asInteger(value,0));
else if (key.equals(Query.TRACE_LEVEL))
query.setTraceLevel(asInteger(value,0));
else if (key.equals(Query.EXPLAIN_LEVEL))
query.setExplainLevel(asInteger(value,0));
else if (key.equals(Query.TIMEOUT))
query.setTimeout(value.toString());
else if (key.equals(Query.NO_CACHE))
query.setNoCache(asBoolean(value,false));
else if (key.equals(Query.GROUPING_SESSION_CACHE))
query.setGroupingSessionCache(asBoolean(value, true));
else
super.set(key,value,context);
}
else {
super.set(key, value, context);
}
}
catch (Exception e) {
if (e.getMessage() != null && e.getMessage().startsWith("Could not set"))
throw e;
else
throw new IllegalInputException("Could not set '" + key + "' to '" + value + "'", e);
}
} | System.out.println("Setting " + key); | public void set(CompoundName key, Object value, Map<String,String> context) {
try {
if (key.size() == 2 && key.first().equals(Model.MODEL)) {
Model model = query.getModel();
if (key.last().equals(Model.QUERY_STRING))
model.setQueryString(asString(value, ""));
else if (key.last().equals(Model.TYPE))
model.setType(asString(value, "ANY"));
else if (key.last().equals(Model.FILTER))
model.setFilter(asString(value, ""));
else if (key.last().equals(Model.DEFAULT_INDEX))
model.setDefaultIndex(asString(value, ""));
else if (key.last().equals(Model.LANGUAGE))
model.setLanguage(asString(value, ""));
else if (key.last().equals(Model.LOCALE))
model.setLocale(asString(value, ""));
else if (key.last().equals(Model.ENCODING))
model.setEncoding(asString(value,""));
else if (key.last().equals(Model.SEARCH_PATH))
model.setSearchPath(asString(value,""));
else if (key.last().equals(Model.SOURCES))
model.setSources(asString(value,""));
else if (key.last().equals(Model.RESTRICT))
model.setRestrict(asString(value,""));
else
throwIllegalParameter(key.last(), Model.MODEL);
}
else if (key.first().equals(Ranking.RANKING)) {
Ranking ranking = query.getRanking();
if (key.size() == 2) {
if (key.last().equals(Ranking.LOCATION))
ranking.setLocation(asString(value,""));
else if (key.last().equals(Ranking.PROFILE))
ranking.setProfile(asString(value,""));
else if (key.last().equals(Ranking.SORTING))
ranking.setSorting(asString(value,""));
else if (key.last().equals(Ranking.FRESHNESS))
ranking.setFreshness(asString(value, ""));
else if (key.last().equals(Ranking.QUERYCACHE))
ranking.setQueryCache(asBoolean(value, false));
else if (key.last().equals(Ranking.RERANKCOUNT))
ranking.setRerankCount(asInteger(value, null));
else if (key.last().equals(Ranking.LIST_FEATURES))
ranking.setListFeatures(asBoolean(value,false));
else
throwIllegalParameter(key.last(), Ranking.RANKING);
}
else if (key.size() >= 3 && key.get(1).equals(Ranking.MATCH_PHASE)) {
if (key.size() == 3) {
MatchPhase matchPhase = ranking.getMatchPhase();
if (key.last().equals(MatchPhase.ATTRIBUTE))
matchPhase.setAttribute(asString(value, null));
else if (key.last().equals(MatchPhase.ASCENDING))
matchPhase.setAscending(asBoolean(value, false));
else if (key.last().equals(MatchPhase.MAX_HITS))
matchPhase.setMaxHits(asLong(value, null));
else if (key.last().equals(MatchPhase.MAX_FILTER_COVERAGE))
matchPhase.setMaxFilterCoverage(asDouble(value, 0.2));
else
throwIllegalParameter(key.rest().toString(), Ranking.MATCH_PHASE);
}
else if (key.size() > 3 && key.get(2).equals(Ranking.DIVERSITY)) {
Diversity diversity = ranking.getMatchPhase().getDiversity();
if (key.last().equals(Diversity.ATTRIBUTE)) {
diversity.setAttribute(asString(value, null));
}
else if (key.last().equals(Diversity.MINGROUPS)) {
diversity.setMinGroups(asLong(value, null));
}
else if ((key.size() > 4) && key.get(3).equals(Diversity.CUTOFF)) {
if (key.last().equals(Diversity.FACTOR))
diversity.setCutoffFactor(asDouble(value, 10.0));
else if (key.last().equals(Diversity.STRATEGY))
diversity.setCutoffStrategy(asString(value, "loose"));
else
throwIllegalParameter(key.rest().toString(), Diversity.CUTOFF);
}
else {
throwIllegalParameter(key.rest().toString(), Ranking.DIVERSITY);
}
}
}
else if (key.size() == 3 && key.get(1).equals(Ranking.SOFTTIMEOUT)) {
SoftTimeout soft = ranking.getSoftTimeout();
if (key.last().equals(SoftTimeout.ENABLE))
soft.setEnable(asBoolean(value, true));
else if (key.last().equals(SoftTimeout.FACTOR))
soft.setFactor(asDouble(value, null));
else if (key.last().equals(SoftTimeout.TAILCOST))
soft.setTailcost(asDouble(value, null));
else
throwIllegalParameter(key.rest().toString(), Ranking.SOFTTIMEOUT);
}
else if (key.size() == 3 && key.get(1).equals(Ranking.MATCHING)) {
Matching matching = ranking.getMatching();
if (key.last().equals(Matching.TERMWISELIMIT))
matching.setTermwiselimit(asDouble(value, 1.0));
else if (key.last().equals(Matching.NUMTHREADSPERSEARCH))
matching.setNumThreadsPerSearch(asInteger(value, 1));
else if (key.last().equals(Matching.NUMSEARCHPARTITIIONS))
matching.setNumSearchPartitions(asInteger(value, 1));
else if (key.last().equals(Matching.MINHITSPERTHREAD))
matching.setMinHitsPerThread(asInteger(value, 0));
else if (key.last().equals(Matching.POST_FILTER_THRESHOLD))
matching.setPostFilterThreshold(asDouble(value, 1.0));
else if (key.last().equals(Matching.APPROXIMATE_THRESHOLD))
matching.setApproximateThreshold(asDouble(value, 0.05));
else
throwIllegalParameter(key.rest().toString(), Ranking.MATCHING);
}
else if (key.size() > 2) {
String restKey = key.rest().rest().toString();
chained().requireSettable(key, value, context);
if (key.get(1).equals(Ranking.FEATURES))
setRankFeature(query, restKey, toSpecifiedType(restKey,
value,
profileRegistry.getTypeRegistry().getComponent("features"),
context));
else if (key.get(1).equals(Ranking.PROPERTIES))
ranking.getProperties().put(restKey, toSpecifiedType(restKey,
value,
profileRegistry.getTypeRegistry().getComponent("properties"),
context));
else
throwIllegalParameter(key.rest().toString(), Ranking.RANKING);
}
}
else if (key.first().equals(Presentation.PRESENTATION)) {
if (key.size() == 2) {
if (key.last().equals(Presentation.BOLDING))
query.getPresentation().setBolding(asBoolean(value, true));
else if (key.last().equals(Presentation.SUMMARY))
query.getPresentation().setSummary(asString(value, ""));
else if (key.last().equals(Presentation.FORMAT))
query.getPresentation().setFormat(asString(value, ""));
else if (key.last().equals(Presentation.TIMING))
query.getPresentation().setTiming(asBoolean(value, true));
else if (key.last().equals(Presentation.SUMMARY_FIELDS))
query.getPresentation().setSummaryFields(asString(value, ""));
else
throwIllegalParameter(key.last(), Presentation.PRESENTATION);
}
else if (key.size() == 3 && key.get(1).equals(Presentation.FORMAT)) {
if (key.last().equals(Presentation.TENSORS))
query.getPresentation().setTensorShortForm(asString(value, ""));
else
throwIllegalParameter(key.last(), Presentation.FORMAT);
}
else
throwIllegalParameter(key.last(), Presentation.PRESENTATION);
}
else if (key.first().equals(Select.SELECT)) {
if (key.size() == 1) {
query.getSelect().setGroupingExpressionString(asString(value, ""));
}
else if (key.size() == 2) {
if (key.last().equals(Select.WHERE))
query.getSelect().setWhereString(asString(value, ""));
else if (key.last().equals(Select.GROUPING))
query.getSelect().setGroupingString(asString(value, ""));
else
throwIllegalParameter(key.rest().toString(), Select.SELECT);
}
else {
throwIllegalParameter(key.last(), Select.SELECT);
}
}
else if (key.size() == 1) {
if (key.equals(Query.HITS))
query.setHits(asInteger(value,10));
else if (key.equals(Query.OFFSET))
query.setOffset(asInteger(value,0));
else if (key.equals(Query.TRACE_LEVEL))
query.setTraceLevel(asInteger(value,0));
else if (key.equals(Query.EXPLAIN_LEVEL))
query.setExplainLevel(asInteger(value,0));
else if (key.equals(Query.TIMEOUT))
query.setTimeout(value.toString());
else if (key.equals(Query.NO_CACHE))
query.setNoCache(asBoolean(value,false));
else if (key.equals(Query.GROUPING_SESSION_CACHE))
query.setGroupingSessionCache(asBoolean(value, true));
else
super.set(key,value,context);
}
else {
super.set(key, value, context);
}
}
catch (Exception e) {
if (e.getMessage() != null && e.getMessage().startsWith("Could not set"))
throw e;
else
throw new IllegalInputException("Could not set '" + key + "' to '" + value + "'", e);
}
} | class QueryProperties extends Properties {
private Query query;
private final CompiledQueryProfileRegistry profileRegistry;
private final Map<String, Embedder> embedders;
@Deprecated
public QueryProperties(Query query, CompiledQueryProfileRegistry profileRegistry, Embedder embedder) {
this(query, profileRegistry, Map.of(Embedder.defaultEmbedderId, embedder));
}
public QueryProperties(Query query, CompiledQueryProfileRegistry profileRegistry, Map<String, Embedder> embedders) {
this.query = query;
this.profileRegistry = profileRegistry;
this.embedders = embedders;
}
public void setParentQuery(Query query) {
this.query = query;
super.setParentQuery(query);
}
@Override
public Object get(CompoundName key,
Map<String, String> context,
com.yahoo.processing.request.Properties substitution) {
if (key.size() == 2 && key.first().equals(Model.MODEL)) {
Model model = query.getModel();
if (key.last().equals(Model.QUERY_STRING)) return model.getQueryString();
if (key.last().equals(Model.TYPE)) return model.getType();
if (key.last().equals(Model.FILTER)) return model.getFilter();
if (key.last().equals(Model.DEFAULT_INDEX)) return model.getDefaultIndex();
if (key.last().equals(Model.LANGUAGE)) return model.getLanguage();
if (key.last().equals(Model.LOCALE)) return model.getLocale();
if (key.last().equals(Model.ENCODING)) return model.getEncoding();
if (key.last().equals(Model.SOURCES)) return model.getSources();
if (key.last().equals(Model.SEARCH_PATH)) return model.getSearchPath();
if (key.last().equals(Model.RESTRICT)) return model.getRestrict();
}
else if (key.first().equals(Ranking.RANKING)) {
Ranking ranking = query.getRanking();
if (key.size() == 2) {
if (key.last().equals(Ranking.LOCATION)) return ranking.getLocation();
if (key.last().equals(Ranking.PROFILE)) return ranking.getProfile();
if (key.last().equals(Ranking.SORTING)) return ranking.getSorting();
if (key.last().equals(Ranking.FRESHNESS)) return ranking.getFreshness();
if (key.last().equals(Ranking.QUERYCACHE)) return ranking.getQueryCache();
if (key.last().equals(Ranking.RERANKCOUNT)) return ranking.getRerankCount();
if (key.last().equals(Ranking.LIST_FEATURES)) return ranking.getListFeatures();
}
else if (key.size() >= 3 && key.get(1).equals(Ranking.MATCH_PHASE)) {
if (key.size() == 3) {
MatchPhase matchPhase = ranking.getMatchPhase();
if (key.last().equals(MatchPhase.ATTRIBUTE)) return matchPhase.getAttribute();
if (key.last().equals(MatchPhase.ASCENDING)) return matchPhase.getAscending();
if (key.last().equals(MatchPhase.MAX_HITS)) return matchPhase.getMaxHits();
if (key.last().equals(MatchPhase.MAX_FILTER_COVERAGE)) return matchPhase.getMaxFilterCoverage();
} else if (key.size() >= 4 && key.get(2).equals(Ranking.DIVERSITY)) {
Diversity diversity = ranking.getMatchPhase().getDiversity();
if (key.size() == 4) {
if (key.last().equals(Diversity.ATTRIBUTE)) return diversity.getAttribute();
if (key.last().equals(Diversity.MINGROUPS)) return diversity.getMinGroups();
} else if ((key.size() == 5) && key.get(3).equals(Diversity.CUTOFF)) {
if (key.last().equals(Diversity.FACTOR)) return diversity.getCutoffFactor();
if (key.last().equals(Diversity.STRATEGY)) return diversity.getCutoffStrategy();
}
}
}
else if (key.size() == 3 && key.get(1).equals(Ranking.SOFTTIMEOUT)) {
SoftTimeout soft = ranking.getSoftTimeout();
if (key.last().equals(SoftTimeout.ENABLE)) return soft.getEnable();
if (key.last().equals(SoftTimeout.FACTOR)) return soft.getFactor();
if (key.last().equals(SoftTimeout.TAILCOST)) return soft.getTailcost();
}
else if (key.size() == 3 && key.get(1).equals(Ranking.MATCHING)) {
Matching matching = ranking.getMatching();
if (key.last().equals(Matching.TERMWISELIMIT)) return matching.getTermwiseLimit();
if (key.last().equals(Matching.NUMTHREADSPERSEARCH)) return matching.getNumThreadsPerSearch();
if (key.last().equals(Matching.NUMSEARCHPARTITIIONS)) return matching.getNumSearchPartitions();
if (key.last().equals(Matching.MINHITSPERTHREAD)) return matching.getMinHitsPerThread();
}
else if (key.size() > 2) {
if (key.get(1).equals(Ranking.FEATURES)) return ranking.getFeatures().getObject(key.rest().rest().toString());
if (key.get(1).equals(Ranking.PROPERTIES)) return ranking.getProperties().get(key.rest().rest().toString());
}
}
else if (key.first().equals(Select.SELECT)) {
if (key.size() == 1) {
return query.getSelect().getGroupingExpressionString();
}
else if (key.size() == 2) {
if (key.last().equals(Select.WHERE)) return query.getSelect().getWhereString();
if (key.last().equals(Select.GROUPING)) return query.getSelect().getGroupingString();
}
}
else if (key.first().equals(Presentation.PRESENTATION)) {
if (key.size() == 2) {
if (key.last().equals(Presentation.BOLDING)) return query.getPresentation().getBolding();
if (key.last().equals(Presentation.SUMMARY)) return query.getPresentation().getSummary();
if (key.last().equals(Presentation.FORMAT)) return query.getPresentation().getFormat();
if (key.last().equals(Presentation.TIMING)) return query.getPresentation().getTiming();
if (key.last().equals(Presentation.SUMMARY_FIELDS)) return query.getPresentation().getSummaryFields();
} else if (key.size() == 3 && key.get(1).equals(Presentation.FORMAT)) {
if (key.last().equals(Presentation.TENSORS)) return query.getPresentation().getTensorShortForm();
}
} else if (key.size() == 1) {
if (key.equals(Query.HITS)) return query.getHits();
if (key.equals(Query.OFFSET)) return query.getOffset();
if (key.equals(Query.TRACE_LEVEL)) return query.getTraceLevel();
if (key.equals(Query.EXPLAIN_LEVEL)) return query.getExplainLevel();
if (key.equals(Query.TIMEOUT)) return query.getTimeout();
if (key.equals(Query.NO_CACHE)) return query.getNoCache();
if (key.equals(Query.GROUPING_SESSION_CACHE)) return query.getGroupingSessionCache();
if (key.toString().equals(Model.MODEL)) return query.getModel();
if (key.toString().equals(Ranking.RANKING)) return query.getRanking();
if (key.toString().equals(Presentation.PRESENTATION)) return query.getPresentation();
}
return super.get(key, context, substitution);
}
@Override
@Override
public Map<String, Object> listProperties(CompoundName prefix,
Map<String,String> context,
com.yahoo.processing.request.Properties substitution) {
Map<String, Object> properties = super.listProperties(prefix, context, substitution);
for (CompoundName queryProperty : Query.nativeProperties) {
if (queryProperty.hasPrefix(prefix)) {
Object value = this.get(queryProperty, context, substitution);
if (value != null)
properties.put(queryProperty.toString(), value);
}
}
return properties;
}
private void setRankFeature(Query query, String key, Object value) {
if (value instanceof Tensor) {
query.getRanking().getFeatures().put(key, (Tensor) value);
}
else if (value instanceof Double) {
query.getRanking().getFeatures().put(key, (Double) value);
}
else {
String valueString = asString(value, "");
try {
query.getRanking().getFeatures().put(key, Double.parseDouble(valueString));
}
catch (IllegalArgumentException e) {
query.getRanking().getFeatures().put(key, valueString);
}
}
}
private Object toSpecifiedType(String key, Object value, QueryProfileType type, Map<String,String> context) {
if ( ! ( value instanceof String)) return value;
if (type == null) return value;
FieldDescription field = type.getField(key);
if (field == null) return value;
return field.getType().convertFrom(value, new ConversionContext(key, profileRegistry, embedders, context));
}
private void throwIllegalParameter(String key,String namespace) {
throw new IllegalInputException("'" + key + "' is not a valid property in '" + namespace +
"'. See the query api for valid keys starting by '" + namespace + "'.");
}
@Override
public final Query getParentQuery() {
return query;
}
} | class QueryProperties extends Properties {
private Query query;
private final CompiledQueryProfileRegistry profileRegistry;
private final Map<String, Embedder> embedders;
@Deprecated
public QueryProperties(Query query, CompiledQueryProfileRegistry profileRegistry, Embedder embedder) {
this(query, profileRegistry, Map.of(Embedder.defaultEmbedderId, embedder));
}
public QueryProperties(Query query, CompiledQueryProfileRegistry profileRegistry, Map<String, Embedder> embedders) {
this.query = query;
this.profileRegistry = profileRegistry;
this.embedders = embedders;
}
public void setParentQuery(Query query) {
this.query = query;
super.setParentQuery(query);
}
@Override
public Object get(CompoundName key,
Map<String, String> context,
com.yahoo.processing.request.Properties substitution) {
if (key.size() == 2 && key.first().equals(Model.MODEL)) {
Model model = query.getModel();
if (key.last().equals(Model.QUERY_STRING)) return model.getQueryString();
if (key.last().equals(Model.TYPE)) return model.getType();
if (key.last().equals(Model.FILTER)) return model.getFilter();
if (key.last().equals(Model.DEFAULT_INDEX)) return model.getDefaultIndex();
if (key.last().equals(Model.LANGUAGE)) return model.getLanguage();
if (key.last().equals(Model.LOCALE)) return model.getLocale();
if (key.last().equals(Model.ENCODING)) return model.getEncoding();
if (key.last().equals(Model.SOURCES)) return model.getSources();
if (key.last().equals(Model.SEARCH_PATH)) return model.getSearchPath();
if (key.last().equals(Model.RESTRICT)) return model.getRestrict();
}
else if (key.first().equals(Ranking.RANKING)) {
Ranking ranking = query.getRanking();
if (key.size() == 2) {
if (key.last().equals(Ranking.LOCATION)) return ranking.getLocation();
if (key.last().equals(Ranking.PROFILE)) return ranking.getProfile();
if (key.last().equals(Ranking.SORTING)) return ranking.getSorting();
if (key.last().equals(Ranking.FRESHNESS)) return ranking.getFreshness();
if (key.last().equals(Ranking.QUERYCACHE)) return ranking.getQueryCache();
if (key.last().equals(Ranking.RERANKCOUNT)) return ranking.getRerankCount();
if (key.last().equals(Ranking.LIST_FEATURES)) return ranking.getListFeatures();
}
else if (key.size() >= 3 && key.get(1).equals(Ranking.MATCH_PHASE)) {
if (key.size() == 3) {
MatchPhase matchPhase = ranking.getMatchPhase();
if (key.last().equals(MatchPhase.ATTRIBUTE)) return matchPhase.getAttribute();
if (key.last().equals(MatchPhase.ASCENDING)) return matchPhase.getAscending();
if (key.last().equals(MatchPhase.MAX_HITS)) return matchPhase.getMaxHits();
if (key.last().equals(MatchPhase.MAX_FILTER_COVERAGE)) return matchPhase.getMaxFilterCoverage();
} else if (key.size() >= 4 && key.get(2).equals(Ranking.DIVERSITY)) {
Diversity diversity = ranking.getMatchPhase().getDiversity();
if (key.size() == 4) {
if (key.last().equals(Diversity.ATTRIBUTE)) return diversity.getAttribute();
if (key.last().equals(Diversity.MINGROUPS)) return diversity.getMinGroups();
} else if ((key.size() == 5) && key.get(3).equals(Diversity.CUTOFF)) {
if (key.last().equals(Diversity.FACTOR)) return diversity.getCutoffFactor();
if (key.last().equals(Diversity.STRATEGY)) return diversity.getCutoffStrategy();
}
}
}
else if (key.size() == 3 && key.get(1).equals(Ranking.SOFTTIMEOUT)) {
SoftTimeout soft = ranking.getSoftTimeout();
if (key.last().equals(SoftTimeout.ENABLE)) return soft.getEnable();
if (key.last().equals(SoftTimeout.FACTOR)) return soft.getFactor();
if (key.last().equals(SoftTimeout.TAILCOST)) return soft.getTailcost();
}
else if (key.size() == 3 && key.get(1).equals(Ranking.MATCHING)) {
Matching matching = ranking.getMatching();
if (key.last().equals(Matching.TERMWISELIMIT)) return matching.getTermwiseLimit();
if (key.last().equals(Matching.NUMTHREADSPERSEARCH)) return matching.getNumThreadsPerSearch();
if (key.last().equals(Matching.NUMSEARCHPARTITIIONS)) return matching.getNumSearchPartitions();
if (key.last().equals(Matching.MINHITSPERTHREAD)) return matching.getMinHitsPerThread();
}
else if (key.size() > 2) {
if (key.get(1).equals(Ranking.FEATURES)) return ranking.getFeatures().getObject(key.rest().rest().toString());
if (key.get(1).equals(Ranking.PROPERTIES)) return ranking.getProperties().get(key.rest().rest().toString());
}
}
else if (key.first().equals(Select.SELECT)) {
if (key.size() == 1) {
return query.getSelect().getGroupingExpressionString();
}
else if (key.size() == 2) {
if (key.last().equals(Select.WHERE)) return query.getSelect().getWhereString();
if (key.last().equals(Select.GROUPING)) return query.getSelect().getGroupingString();
}
}
else if (key.first().equals(Presentation.PRESENTATION)) {
if (key.size() == 2) {
if (key.last().equals(Presentation.BOLDING)) return query.getPresentation().getBolding();
if (key.last().equals(Presentation.SUMMARY)) return query.getPresentation().getSummary();
if (key.last().equals(Presentation.FORMAT)) return query.getPresentation().getFormat();
if (key.last().equals(Presentation.TIMING)) return query.getPresentation().getTiming();
if (key.last().equals(Presentation.SUMMARY_FIELDS)) return query.getPresentation().getSummaryFields();
} else if (key.size() == 3 && key.get(1).equals(Presentation.FORMAT)) {
if (key.last().equals(Presentation.TENSORS)) return query.getPresentation().getTensorShortForm();
}
} else if (key.size() == 1) {
if (key.equals(Query.HITS)) return query.getHits();
if (key.equals(Query.OFFSET)) return query.getOffset();
if (key.equals(Query.TRACE_LEVEL)) return query.getTraceLevel();
if (key.equals(Query.EXPLAIN_LEVEL)) return query.getExplainLevel();
if (key.equals(Query.TIMEOUT)) return query.getTimeout();
if (key.equals(Query.NO_CACHE)) return query.getNoCache();
if (key.equals(Query.GROUPING_SESSION_CACHE)) return query.getGroupingSessionCache();
if (key.toString().equals(Model.MODEL)) return query.getModel();
if (key.toString().equals(Ranking.RANKING)) return query.getRanking();
if (key.toString().equals(Presentation.PRESENTATION)) return query.getPresentation();
}
return super.get(key, context, substitution);
}
@Override
@Override
public Map<String, Object> listProperties(CompoundName prefix,
Map<String,String> context,
com.yahoo.processing.request.Properties substitution) {
Map<String, Object> properties = super.listProperties(prefix, context, substitution);
for (CompoundName queryProperty : Query.nativeProperties) {
if (queryProperty.hasPrefix(prefix)) {
Object value = this.get(queryProperty, context, substitution);
if (value != null)
properties.put(queryProperty.toString(), value);
}
}
return properties;
}
private void setRankFeature(Query query, String key, Object value) {
if (value instanceof Tensor) {
query.getRanking().getFeatures().put(key, (Tensor) value);
}
else if (value instanceof Double) {
query.getRanking().getFeatures().put(key, (Double) value);
}
else {
String valueString = asString(value, "");
try {
query.getRanking().getFeatures().put(key, Double.parseDouble(valueString));
}
catch (IllegalArgumentException e) {
query.getRanking().getFeatures().put(key, valueString);
}
}
}
private Object toSpecifiedType(String key, Object value, QueryProfileType type, Map<String,String> context) {
if ( ! ( value instanceof String)) return value;
if (type == null) return value;
FieldDescription field = type.getField(key);
if (field == null) return value;
return field.getType().convertFrom(value, new ConversionContext(key, profileRegistry, embedders, context));
}
private void throwIllegalParameter(String key,String namespace) {
throw new IllegalInputException("'" + key + "' is not a valid property in '" + namespace +
"'. See the query api for valid keys starting by '" + namespace + "'.");
}
@Override
public final Query getParentQuery() {
return query;
}
} |
Oops, tx | public void set(CompoundName key, Object value, Map<String,String> context) {
System.out.println("Setting " + key);
try {
if (key.size() == 2 && key.first().equals(Model.MODEL)) {
Model model = query.getModel();
if (key.last().equals(Model.QUERY_STRING))
model.setQueryString(asString(value, ""));
else if (key.last().equals(Model.TYPE))
model.setType(asString(value, "ANY"));
else if (key.last().equals(Model.FILTER))
model.setFilter(asString(value, ""));
else if (key.last().equals(Model.DEFAULT_INDEX))
model.setDefaultIndex(asString(value, ""));
else if (key.last().equals(Model.LANGUAGE))
model.setLanguage(asString(value, ""));
else if (key.last().equals(Model.LOCALE))
model.setLocale(asString(value, ""));
else if (key.last().equals(Model.ENCODING))
model.setEncoding(asString(value,""));
else if (key.last().equals(Model.SEARCH_PATH))
model.setSearchPath(asString(value,""));
else if (key.last().equals(Model.SOURCES))
model.setSources(asString(value,""));
else if (key.last().equals(Model.RESTRICT))
model.setRestrict(asString(value,""));
else
throwIllegalParameter(key.last(), Model.MODEL);
}
else if (key.first().equals(Ranking.RANKING)) {
Ranking ranking = query.getRanking();
if (key.size() == 2) {
if (key.last().equals(Ranking.LOCATION))
ranking.setLocation(asString(value,""));
else if (key.last().equals(Ranking.PROFILE))
ranking.setProfile(asString(value,""));
else if (key.last().equals(Ranking.SORTING))
ranking.setSorting(asString(value,""));
else if (key.last().equals(Ranking.FRESHNESS))
ranking.setFreshness(asString(value, ""));
else if (key.last().equals(Ranking.QUERYCACHE))
ranking.setQueryCache(asBoolean(value, false));
else if (key.last().equals(Ranking.RERANKCOUNT))
ranking.setRerankCount(asInteger(value, null));
else if (key.last().equals(Ranking.LIST_FEATURES))
ranking.setListFeatures(asBoolean(value,false));
else
throwIllegalParameter(key.last(), Ranking.RANKING);
}
else if (key.size() >= 3 && key.get(1).equals(Ranking.MATCH_PHASE)) {
if (key.size() == 3) {
MatchPhase matchPhase = ranking.getMatchPhase();
if (key.last().equals(MatchPhase.ATTRIBUTE))
matchPhase.setAttribute(asString(value, null));
else if (key.last().equals(MatchPhase.ASCENDING))
matchPhase.setAscending(asBoolean(value, false));
else if (key.last().equals(MatchPhase.MAX_HITS))
matchPhase.setMaxHits(asLong(value, null));
else if (key.last().equals(MatchPhase.MAX_FILTER_COVERAGE))
matchPhase.setMaxFilterCoverage(asDouble(value, 0.2));
else
throwIllegalParameter(key.rest().toString(), Ranking.MATCH_PHASE);
}
else if (key.size() > 3 && key.get(2).equals(Ranking.DIVERSITY)) {
Diversity diversity = ranking.getMatchPhase().getDiversity();
if (key.last().equals(Diversity.ATTRIBUTE)) {
diversity.setAttribute(asString(value, null));
}
else if (key.last().equals(Diversity.MINGROUPS)) {
diversity.setMinGroups(asLong(value, null));
}
else if ((key.size() > 4) && key.get(3).equals(Diversity.CUTOFF)) {
if (key.last().equals(Diversity.FACTOR))
diversity.setCutoffFactor(asDouble(value, 10.0));
else if (key.last().equals(Diversity.STRATEGY))
diversity.setCutoffStrategy(asString(value, "loose"));
else
throwIllegalParameter(key.rest().toString(), Diversity.CUTOFF);
}
else {
throwIllegalParameter(key.rest().toString(), Ranking.DIVERSITY);
}
}
}
else if (key.size() == 3 && key.get(1).equals(Ranking.SOFTTIMEOUT)) {
SoftTimeout soft = ranking.getSoftTimeout();
if (key.last().equals(SoftTimeout.ENABLE))
soft.setEnable(asBoolean(value, true));
else if (key.last().equals(SoftTimeout.FACTOR))
soft.setFactor(asDouble(value, null));
else if (key.last().equals(SoftTimeout.TAILCOST))
soft.setTailcost(asDouble(value, null));
else
throwIllegalParameter(key.rest().toString(), Ranking.SOFTTIMEOUT);
}
else if (key.size() == 3 && key.get(1).equals(Ranking.MATCHING)) {
Matching matching = ranking.getMatching();
if (key.last().equals(Matching.TERMWISELIMIT))
matching.setTermwiselimit(asDouble(value, 1.0));
else if (key.last().equals(Matching.NUMTHREADSPERSEARCH))
matching.setNumThreadsPerSearch(asInteger(value, 1));
else if (key.last().equals(Matching.NUMSEARCHPARTITIIONS))
matching.setNumSearchPartitions(asInteger(value, 1));
else if (key.last().equals(Matching.MINHITSPERTHREAD))
matching.setMinHitsPerThread(asInteger(value, 0));
else if (key.last().equals(Matching.POST_FILTER_THRESHOLD))
matching.setPostFilterThreshold(asDouble(value, 1.0));
else if (key.last().equals(Matching.APPROXIMATE_THRESHOLD))
matching.setApproximateThreshold(asDouble(value, 0.05));
else
throwIllegalParameter(key.rest().toString(), Ranking.MATCHING);
}
else if (key.size() > 2) {
String restKey = key.rest().rest().toString();
chained().requireSettable(key, value, context);
if (key.get(1).equals(Ranking.FEATURES))
setRankFeature(query, restKey, toSpecifiedType(restKey,
value,
profileRegistry.getTypeRegistry().getComponent("features"),
context));
else if (key.get(1).equals(Ranking.PROPERTIES))
ranking.getProperties().put(restKey, toSpecifiedType(restKey,
value,
profileRegistry.getTypeRegistry().getComponent("properties"),
context));
else
throwIllegalParameter(key.rest().toString(), Ranking.RANKING);
}
}
else if (key.first().equals(Presentation.PRESENTATION)) {
if (key.size() == 2) {
if (key.last().equals(Presentation.BOLDING))
query.getPresentation().setBolding(asBoolean(value, true));
else if (key.last().equals(Presentation.SUMMARY))
query.getPresentation().setSummary(asString(value, ""));
else if (key.last().equals(Presentation.FORMAT))
query.getPresentation().setFormat(asString(value, ""));
else if (key.last().equals(Presentation.TIMING))
query.getPresentation().setTiming(asBoolean(value, true));
else if (key.last().equals(Presentation.SUMMARY_FIELDS))
query.getPresentation().setSummaryFields(asString(value, ""));
else
throwIllegalParameter(key.last(), Presentation.PRESENTATION);
}
else if (key.size() == 3 && key.get(1).equals(Presentation.FORMAT)) {
if (key.last().equals(Presentation.TENSORS))
query.getPresentation().setTensorShortForm(asString(value, ""));
else
throwIllegalParameter(key.last(), Presentation.FORMAT);
}
else
throwIllegalParameter(key.last(), Presentation.PRESENTATION);
}
else if (key.first().equals(Select.SELECT)) {
if (key.size() == 1) {
query.getSelect().setGroupingExpressionString(asString(value, ""));
}
else if (key.size() == 2) {
if (key.last().equals(Select.WHERE))
query.getSelect().setWhereString(asString(value, ""));
else if (key.last().equals(Select.GROUPING))
query.getSelect().setGroupingString(asString(value, ""));
else
throwIllegalParameter(key.rest().toString(), Select.SELECT);
}
else {
throwIllegalParameter(key.last(), Select.SELECT);
}
}
else if (key.size() == 1) {
if (key.equals(Query.HITS))
query.setHits(asInteger(value,10));
else if (key.equals(Query.OFFSET))
query.setOffset(asInteger(value,0));
else if (key.equals(Query.TRACE_LEVEL))
query.setTraceLevel(asInteger(value,0));
else if (key.equals(Query.EXPLAIN_LEVEL))
query.setExplainLevel(asInteger(value,0));
else if (key.equals(Query.TIMEOUT))
query.setTimeout(value.toString());
else if (key.equals(Query.NO_CACHE))
query.setNoCache(asBoolean(value,false));
else if (key.equals(Query.GROUPING_SESSION_CACHE))
query.setGroupingSessionCache(asBoolean(value, true));
else
super.set(key,value,context);
}
else {
super.set(key, value, context);
}
}
catch (Exception e) {
if (e.getMessage() != null && e.getMessage().startsWith("Could not set"))
throw e;
else
throw new IllegalInputException("Could not set '" + key + "' to '" + value + "'", e);
}
} | System.out.println("Setting " + key); | public void set(CompoundName key, Object value, Map<String,String> context) {
try {
if (key.size() == 2 && key.first().equals(Model.MODEL)) {
Model model = query.getModel();
if (key.last().equals(Model.QUERY_STRING))
model.setQueryString(asString(value, ""));
else if (key.last().equals(Model.TYPE))
model.setType(asString(value, "ANY"));
else if (key.last().equals(Model.FILTER))
model.setFilter(asString(value, ""));
else if (key.last().equals(Model.DEFAULT_INDEX))
model.setDefaultIndex(asString(value, ""));
else if (key.last().equals(Model.LANGUAGE))
model.setLanguage(asString(value, ""));
else if (key.last().equals(Model.LOCALE))
model.setLocale(asString(value, ""));
else if (key.last().equals(Model.ENCODING))
model.setEncoding(asString(value,""));
else if (key.last().equals(Model.SEARCH_PATH))
model.setSearchPath(asString(value,""));
else if (key.last().equals(Model.SOURCES))
model.setSources(asString(value,""));
else if (key.last().equals(Model.RESTRICT))
model.setRestrict(asString(value,""));
else
throwIllegalParameter(key.last(), Model.MODEL);
}
else if (key.first().equals(Ranking.RANKING)) {
Ranking ranking = query.getRanking();
if (key.size() == 2) {
if (key.last().equals(Ranking.LOCATION))
ranking.setLocation(asString(value,""));
else if (key.last().equals(Ranking.PROFILE))
ranking.setProfile(asString(value,""));
else if (key.last().equals(Ranking.SORTING))
ranking.setSorting(asString(value,""));
else if (key.last().equals(Ranking.FRESHNESS))
ranking.setFreshness(asString(value, ""));
else if (key.last().equals(Ranking.QUERYCACHE))
ranking.setQueryCache(asBoolean(value, false));
else if (key.last().equals(Ranking.RERANKCOUNT))
ranking.setRerankCount(asInteger(value, null));
else if (key.last().equals(Ranking.LIST_FEATURES))
ranking.setListFeatures(asBoolean(value,false));
else
throwIllegalParameter(key.last(), Ranking.RANKING);
}
else if (key.size() >= 3 && key.get(1).equals(Ranking.MATCH_PHASE)) {
if (key.size() == 3) {
MatchPhase matchPhase = ranking.getMatchPhase();
if (key.last().equals(MatchPhase.ATTRIBUTE))
matchPhase.setAttribute(asString(value, null));
else if (key.last().equals(MatchPhase.ASCENDING))
matchPhase.setAscending(asBoolean(value, false));
else if (key.last().equals(MatchPhase.MAX_HITS))
matchPhase.setMaxHits(asLong(value, null));
else if (key.last().equals(MatchPhase.MAX_FILTER_COVERAGE))
matchPhase.setMaxFilterCoverage(asDouble(value, 0.2));
else
throwIllegalParameter(key.rest().toString(), Ranking.MATCH_PHASE);
}
else if (key.size() > 3 && key.get(2).equals(Ranking.DIVERSITY)) {
Diversity diversity = ranking.getMatchPhase().getDiversity();
if (key.last().equals(Diversity.ATTRIBUTE)) {
diversity.setAttribute(asString(value, null));
}
else if (key.last().equals(Diversity.MINGROUPS)) {
diversity.setMinGroups(asLong(value, null));
}
else if ((key.size() > 4) && key.get(3).equals(Diversity.CUTOFF)) {
if (key.last().equals(Diversity.FACTOR))
diversity.setCutoffFactor(asDouble(value, 10.0));
else if (key.last().equals(Diversity.STRATEGY))
diversity.setCutoffStrategy(asString(value, "loose"));
else
throwIllegalParameter(key.rest().toString(), Diversity.CUTOFF);
}
else {
throwIllegalParameter(key.rest().toString(), Ranking.DIVERSITY);
}
}
}
else if (key.size() == 3 && key.get(1).equals(Ranking.SOFTTIMEOUT)) {
SoftTimeout soft = ranking.getSoftTimeout();
if (key.last().equals(SoftTimeout.ENABLE))
soft.setEnable(asBoolean(value, true));
else if (key.last().equals(SoftTimeout.FACTOR))
soft.setFactor(asDouble(value, null));
else if (key.last().equals(SoftTimeout.TAILCOST))
soft.setTailcost(asDouble(value, null));
else
throwIllegalParameter(key.rest().toString(), Ranking.SOFTTIMEOUT);
}
else if (key.size() == 3 && key.get(1).equals(Ranking.MATCHING)) {
Matching matching = ranking.getMatching();
if (key.last().equals(Matching.TERMWISELIMIT))
matching.setTermwiselimit(asDouble(value, 1.0));
else if (key.last().equals(Matching.NUMTHREADSPERSEARCH))
matching.setNumThreadsPerSearch(asInteger(value, 1));
else if (key.last().equals(Matching.NUMSEARCHPARTITIIONS))
matching.setNumSearchPartitions(asInteger(value, 1));
else if (key.last().equals(Matching.MINHITSPERTHREAD))
matching.setMinHitsPerThread(asInteger(value, 0));
else if (key.last().equals(Matching.POST_FILTER_THRESHOLD))
matching.setPostFilterThreshold(asDouble(value, 1.0));
else if (key.last().equals(Matching.APPROXIMATE_THRESHOLD))
matching.setApproximateThreshold(asDouble(value, 0.05));
else
throwIllegalParameter(key.rest().toString(), Ranking.MATCHING);
}
else if (key.size() > 2) {
String restKey = key.rest().rest().toString();
chained().requireSettable(key, value, context);
if (key.get(1).equals(Ranking.FEATURES))
setRankFeature(query, restKey, toSpecifiedType(restKey,
value,
profileRegistry.getTypeRegistry().getComponent("features"),
context));
else if (key.get(1).equals(Ranking.PROPERTIES))
ranking.getProperties().put(restKey, toSpecifiedType(restKey,
value,
profileRegistry.getTypeRegistry().getComponent("properties"),
context));
else
throwIllegalParameter(key.rest().toString(), Ranking.RANKING);
}
}
else if (key.first().equals(Presentation.PRESENTATION)) {
if (key.size() == 2) {
if (key.last().equals(Presentation.BOLDING))
query.getPresentation().setBolding(asBoolean(value, true));
else if (key.last().equals(Presentation.SUMMARY))
query.getPresentation().setSummary(asString(value, ""));
else if (key.last().equals(Presentation.FORMAT))
query.getPresentation().setFormat(asString(value, ""));
else if (key.last().equals(Presentation.TIMING))
query.getPresentation().setTiming(asBoolean(value, true));
else if (key.last().equals(Presentation.SUMMARY_FIELDS))
query.getPresentation().setSummaryFields(asString(value, ""));
else
throwIllegalParameter(key.last(), Presentation.PRESENTATION);
}
else if (key.size() == 3 && key.get(1).equals(Presentation.FORMAT)) {
if (key.last().equals(Presentation.TENSORS))
query.getPresentation().setTensorShortForm(asString(value, ""));
else
throwIllegalParameter(key.last(), Presentation.FORMAT);
}
else
throwIllegalParameter(key.last(), Presentation.PRESENTATION);
}
else if (key.first().equals(Select.SELECT)) {
if (key.size() == 1) {
query.getSelect().setGroupingExpressionString(asString(value, ""));
}
else if (key.size() == 2) {
if (key.last().equals(Select.WHERE))
query.getSelect().setWhereString(asString(value, ""));
else if (key.last().equals(Select.GROUPING))
query.getSelect().setGroupingString(asString(value, ""));
else
throwIllegalParameter(key.rest().toString(), Select.SELECT);
}
else {
throwIllegalParameter(key.last(), Select.SELECT);
}
}
else if (key.size() == 1) {
if (key.equals(Query.HITS))
query.setHits(asInteger(value,10));
else if (key.equals(Query.OFFSET))
query.setOffset(asInteger(value,0));
else if (key.equals(Query.TRACE_LEVEL))
query.setTraceLevel(asInteger(value,0));
else if (key.equals(Query.EXPLAIN_LEVEL))
query.setExplainLevel(asInteger(value,0));
else if (key.equals(Query.TIMEOUT))
query.setTimeout(value.toString());
else if (key.equals(Query.NO_CACHE))
query.setNoCache(asBoolean(value,false));
else if (key.equals(Query.GROUPING_SESSION_CACHE))
query.setGroupingSessionCache(asBoolean(value, true));
else
super.set(key,value,context);
}
else {
super.set(key, value, context);
}
}
catch (Exception e) {
if (e.getMessage() != null && e.getMessage().startsWith("Could not set"))
throw e;
else
throw new IllegalInputException("Could not set '" + key + "' to '" + value + "'", e);
}
} | class QueryProperties extends Properties {
private Query query;
private final CompiledQueryProfileRegistry profileRegistry;
private final Map<String, Embedder> embedders;
@Deprecated
public QueryProperties(Query query, CompiledQueryProfileRegistry profileRegistry, Embedder embedder) {
this(query, profileRegistry, Map.of(Embedder.defaultEmbedderId, embedder));
}
public QueryProperties(Query query, CompiledQueryProfileRegistry profileRegistry, Map<String, Embedder> embedders) {
this.query = query;
this.profileRegistry = profileRegistry;
this.embedders = embedders;
}
public void setParentQuery(Query query) {
this.query = query;
super.setParentQuery(query);
}
@Override
public Object get(CompoundName key,
Map<String, String> context,
com.yahoo.processing.request.Properties substitution) {
if (key.size() == 2 && key.first().equals(Model.MODEL)) {
Model model = query.getModel();
if (key.last().equals(Model.QUERY_STRING)) return model.getQueryString();
if (key.last().equals(Model.TYPE)) return model.getType();
if (key.last().equals(Model.FILTER)) return model.getFilter();
if (key.last().equals(Model.DEFAULT_INDEX)) return model.getDefaultIndex();
if (key.last().equals(Model.LANGUAGE)) return model.getLanguage();
if (key.last().equals(Model.LOCALE)) return model.getLocale();
if (key.last().equals(Model.ENCODING)) return model.getEncoding();
if (key.last().equals(Model.SOURCES)) return model.getSources();
if (key.last().equals(Model.SEARCH_PATH)) return model.getSearchPath();
if (key.last().equals(Model.RESTRICT)) return model.getRestrict();
}
else if (key.first().equals(Ranking.RANKING)) {
Ranking ranking = query.getRanking();
if (key.size() == 2) {
if (key.last().equals(Ranking.LOCATION)) return ranking.getLocation();
if (key.last().equals(Ranking.PROFILE)) return ranking.getProfile();
if (key.last().equals(Ranking.SORTING)) return ranking.getSorting();
if (key.last().equals(Ranking.FRESHNESS)) return ranking.getFreshness();
if (key.last().equals(Ranking.QUERYCACHE)) return ranking.getQueryCache();
if (key.last().equals(Ranking.RERANKCOUNT)) return ranking.getRerankCount();
if (key.last().equals(Ranking.LIST_FEATURES)) return ranking.getListFeatures();
}
else if (key.size() >= 3 && key.get(1).equals(Ranking.MATCH_PHASE)) {
if (key.size() == 3) {
MatchPhase matchPhase = ranking.getMatchPhase();
if (key.last().equals(MatchPhase.ATTRIBUTE)) return matchPhase.getAttribute();
if (key.last().equals(MatchPhase.ASCENDING)) return matchPhase.getAscending();
if (key.last().equals(MatchPhase.MAX_HITS)) return matchPhase.getMaxHits();
if (key.last().equals(MatchPhase.MAX_FILTER_COVERAGE)) return matchPhase.getMaxFilterCoverage();
} else if (key.size() >= 4 && key.get(2).equals(Ranking.DIVERSITY)) {
Diversity diversity = ranking.getMatchPhase().getDiversity();
if (key.size() == 4) {
if (key.last().equals(Diversity.ATTRIBUTE)) return diversity.getAttribute();
if (key.last().equals(Diversity.MINGROUPS)) return diversity.getMinGroups();
} else if ((key.size() == 5) && key.get(3).equals(Diversity.CUTOFF)) {
if (key.last().equals(Diversity.FACTOR)) return diversity.getCutoffFactor();
if (key.last().equals(Diversity.STRATEGY)) return diversity.getCutoffStrategy();
}
}
}
else if (key.size() == 3 && key.get(1).equals(Ranking.SOFTTIMEOUT)) {
SoftTimeout soft = ranking.getSoftTimeout();
if (key.last().equals(SoftTimeout.ENABLE)) return soft.getEnable();
if (key.last().equals(SoftTimeout.FACTOR)) return soft.getFactor();
if (key.last().equals(SoftTimeout.TAILCOST)) return soft.getTailcost();
}
else if (key.size() == 3 && key.get(1).equals(Ranking.MATCHING)) {
Matching matching = ranking.getMatching();
if (key.last().equals(Matching.TERMWISELIMIT)) return matching.getTermwiseLimit();
if (key.last().equals(Matching.NUMTHREADSPERSEARCH)) return matching.getNumThreadsPerSearch();
if (key.last().equals(Matching.NUMSEARCHPARTITIIONS)) return matching.getNumSearchPartitions();
if (key.last().equals(Matching.MINHITSPERTHREAD)) return matching.getMinHitsPerThread();
}
else if (key.size() > 2) {
if (key.get(1).equals(Ranking.FEATURES)) return ranking.getFeatures().getObject(key.rest().rest().toString());
if (key.get(1).equals(Ranking.PROPERTIES)) return ranking.getProperties().get(key.rest().rest().toString());
}
}
else if (key.first().equals(Select.SELECT)) {
if (key.size() == 1) {
return query.getSelect().getGroupingExpressionString();
}
else if (key.size() == 2) {
if (key.last().equals(Select.WHERE)) return query.getSelect().getWhereString();
if (key.last().equals(Select.GROUPING)) return query.getSelect().getGroupingString();
}
}
else if (key.first().equals(Presentation.PRESENTATION)) {
if (key.size() == 2) {
if (key.last().equals(Presentation.BOLDING)) return query.getPresentation().getBolding();
if (key.last().equals(Presentation.SUMMARY)) return query.getPresentation().getSummary();
if (key.last().equals(Presentation.FORMAT)) return query.getPresentation().getFormat();
if (key.last().equals(Presentation.TIMING)) return query.getPresentation().getTiming();
if (key.last().equals(Presentation.SUMMARY_FIELDS)) return query.getPresentation().getSummaryFields();
} else if (key.size() == 3 && key.get(1).equals(Presentation.FORMAT)) {
if (key.last().equals(Presentation.TENSORS)) return query.getPresentation().getTensorShortForm();
}
} else if (key.size() == 1) {
if (key.equals(Query.HITS)) return query.getHits();
if (key.equals(Query.OFFSET)) return query.getOffset();
if (key.equals(Query.TRACE_LEVEL)) return query.getTraceLevel();
if (key.equals(Query.EXPLAIN_LEVEL)) return query.getExplainLevel();
if (key.equals(Query.TIMEOUT)) return query.getTimeout();
if (key.equals(Query.NO_CACHE)) return query.getNoCache();
if (key.equals(Query.GROUPING_SESSION_CACHE)) return query.getGroupingSessionCache();
if (key.toString().equals(Model.MODEL)) return query.getModel();
if (key.toString().equals(Ranking.RANKING)) return query.getRanking();
if (key.toString().equals(Presentation.PRESENTATION)) return query.getPresentation();
}
return super.get(key, context, substitution);
}
@Override
@Override
public Map<String, Object> listProperties(CompoundName prefix,
Map<String,String> context,
com.yahoo.processing.request.Properties substitution) {
Map<String, Object> properties = super.listProperties(prefix, context, substitution);
for (CompoundName queryProperty : Query.nativeProperties) {
if (queryProperty.hasPrefix(prefix)) {
Object value = this.get(queryProperty, context, substitution);
if (value != null)
properties.put(queryProperty.toString(), value);
}
}
return properties;
}
private void setRankFeature(Query query, String key, Object value) {
if (value instanceof Tensor) {
query.getRanking().getFeatures().put(key, (Tensor) value);
}
else if (value instanceof Double) {
query.getRanking().getFeatures().put(key, (Double) value);
}
else {
String valueString = asString(value, "");
try {
query.getRanking().getFeatures().put(key, Double.parseDouble(valueString));
}
catch (IllegalArgumentException e) {
query.getRanking().getFeatures().put(key, valueString);
}
}
}
private Object toSpecifiedType(String key, Object value, QueryProfileType type, Map<String,String> context) {
if ( ! ( value instanceof String)) return value;
if (type == null) return value;
FieldDescription field = type.getField(key);
if (field == null) return value;
return field.getType().convertFrom(value, new ConversionContext(key, profileRegistry, embedders, context));
}
private void throwIllegalParameter(String key,String namespace) {
throw new IllegalInputException("'" + key + "' is not a valid property in '" + namespace +
"'. See the query api for valid keys starting by '" + namespace + "'.");
}
@Override
public final Query getParentQuery() {
return query;
}
} | class QueryProperties extends Properties {
private Query query;
private final CompiledQueryProfileRegistry profileRegistry;
private final Map<String, Embedder> embedders;
@Deprecated
public QueryProperties(Query query, CompiledQueryProfileRegistry profileRegistry, Embedder embedder) {
this(query, profileRegistry, Map.of(Embedder.defaultEmbedderId, embedder));
}
public QueryProperties(Query query, CompiledQueryProfileRegistry profileRegistry, Map<String, Embedder> embedders) {
this.query = query;
this.profileRegistry = profileRegistry;
this.embedders = embedders;
}
public void setParentQuery(Query query) {
this.query = query;
super.setParentQuery(query);
}
@Override
public Object get(CompoundName key,
Map<String, String> context,
com.yahoo.processing.request.Properties substitution) {
if (key.size() == 2 && key.first().equals(Model.MODEL)) {
Model model = query.getModel();
if (key.last().equals(Model.QUERY_STRING)) return model.getQueryString();
if (key.last().equals(Model.TYPE)) return model.getType();
if (key.last().equals(Model.FILTER)) return model.getFilter();
if (key.last().equals(Model.DEFAULT_INDEX)) return model.getDefaultIndex();
if (key.last().equals(Model.LANGUAGE)) return model.getLanguage();
if (key.last().equals(Model.LOCALE)) return model.getLocale();
if (key.last().equals(Model.ENCODING)) return model.getEncoding();
if (key.last().equals(Model.SOURCES)) return model.getSources();
if (key.last().equals(Model.SEARCH_PATH)) return model.getSearchPath();
if (key.last().equals(Model.RESTRICT)) return model.getRestrict();
}
else if (key.first().equals(Ranking.RANKING)) {
Ranking ranking = query.getRanking();
if (key.size() == 2) {
if (key.last().equals(Ranking.LOCATION)) return ranking.getLocation();
if (key.last().equals(Ranking.PROFILE)) return ranking.getProfile();
if (key.last().equals(Ranking.SORTING)) return ranking.getSorting();
if (key.last().equals(Ranking.FRESHNESS)) return ranking.getFreshness();
if (key.last().equals(Ranking.QUERYCACHE)) return ranking.getQueryCache();
if (key.last().equals(Ranking.RERANKCOUNT)) return ranking.getRerankCount();
if (key.last().equals(Ranking.LIST_FEATURES)) return ranking.getListFeatures();
}
else if (key.size() >= 3 && key.get(1).equals(Ranking.MATCH_PHASE)) {
if (key.size() == 3) {
MatchPhase matchPhase = ranking.getMatchPhase();
if (key.last().equals(MatchPhase.ATTRIBUTE)) return matchPhase.getAttribute();
if (key.last().equals(MatchPhase.ASCENDING)) return matchPhase.getAscending();
if (key.last().equals(MatchPhase.MAX_HITS)) return matchPhase.getMaxHits();
if (key.last().equals(MatchPhase.MAX_FILTER_COVERAGE)) return matchPhase.getMaxFilterCoverage();
} else if (key.size() >= 4 && key.get(2).equals(Ranking.DIVERSITY)) {
Diversity diversity = ranking.getMatchPhase().getDiversity();
if (key.size() == 4) {
if (key.last().equals(Diversity.ATTRIBUTE)) return diversity.getAttribute();
if (key.last().equals(Diversity.MINGROUPS)) return diversity.getMinGroups();
} else if ((key.size() == 5) && key.get(3).equals(Diversity.CUTOFF)) {
if (key.last().equals(Diversity.FACTOR)) return diversity.getCutoffFactor();
if (key.last().equals(Diversity.STRATEGY)) return diversity.getCutoffStrategy();
}
}
}
else if (key.size() == 3 && key.get(1).equals(Ranking.SOFTTIMEOUT)) {
SoftTimeout soft = ranking.getSoftTimeout();
if (key.last().equals(SoftTimeout.ENABLE)) return soft.getEnable();
if (key.last().equals(SoftTimeout.FACTOR)) return soft.getFactor();
if (key.last().equals(SoftTimeout.TAILCOST)) return soft.getTailcost();
}
else if (key.size() == 3 && key.get(1).equals(Ranking.MATCHING)) {
Matching matching = ranking.getMatching();
if (key.last().equals(Matching.TERMWISELIMIT)) return matching.getTermwiseLimit();
if (key.last().equals(Matching.NUMTHREADSPERSEARCH)) return matching.getNumThreadsPerSearch();
if (key.last().equals(Matching.NUMSEARCHPARTITIIONS)) return matching.getNumSearchPartitions();
if (key.last().equals(Matching.MINHITSPERTHREAD)) return matching.getMinHitsPerThread();
}
else if (key.size() > 2) {
if (key.get(1).equals(Ranking.FEATURES)) return ranking.getFeatures().getObject(key.rest().rest().toString());
if (key.get(1).equals(Ranking.PROPERTIES)) return ranking.getProperties().get(key.rest().rest().toString());
}
}
else if (key.first().equals(Select.SELECT)) {
if (key.size() == 1) {
return query.getSelect().getGroupingExpressionString();
}
else if (key.size() == 2) {
if (key.last().equals(Select.WHERE)) return query.getSelect().getWhereString();
if (key.last().equals(Select.GROUPING)) return query.getSelect().getGroupingString();
}
}
else if (key.first().equals(Presentation.PRESENTATION)) {
if (key.size() == 2) {
if (key.last().equals(Presentation.BOLDING)) return query.getPresentation().getBolding();
if (key.last().equals(Presentation.SUMMARY)) return query.getPresentation().getSummary();
if (key.last().equals(Presentation.FORMAT)) return query.getPresentation().getFormat();
if (key.last().equals(Presentation.TIMING)) return query.getPresentation().getTiming();
if (key.last().equals(Presentation.SUMMARY_FIELDS)) return query.getPresentation().getSummaryFields();
} else if (key.size() == 3 && key.get(1).equals(Presentation.FORMAT)) {
if (key.last().equals(Presentation.TENSORS)) return query.getPresentation().getTensorShortForm();
}
} else if (key.size() == 1) {
if (key.equals(Query.HITS)) return query.getHits();
if (key.equals(Query.OFFSET)) return query.getOffset();
if (key.equals(Query.TRACE_LEVEL)) return query.getTraceLevel();
if (key.equals(Query.EXPLAIN_LEVEL)) return query.getExplainLevel();
if (key.equals(Query.TIMEOUT)) return query.getTimeout();
if (key.equals(Query.NO_CACHE)) return query.getNoCache();
if (key.equals(Query.GROUPING_SESSION_CACHE)) return query.getGroupingSessionCache();
if (key.toString().equals(Model.MODEL)) return query.getModel();
if (key.toString().equals(Ranking.RANKING)) return query.getRanking();
if (key.toString().equals(Presentation.PRESENTATION)) return query.getPresentation();
}
return super.get(key, context, substitution);
}
@Override
@Override
public Map<String, Object> listProperties(CompoundName prefix,
Map<String,String> context,
com.yahoo.processing.request.Properties substitution) {
Map<String, Object> properties = super.listProperties(prefix, context, substitution);
for (CompoundName queryProperty : Query.nativeProperties) {
if (queryProperty.hasPrefix(prefix)) {
Object value = this.get(queryProperty, context, substitution);
if (value != null)
properties.put(queryProperty.toString(), value);
}
}
return properties;
}
private void setRankFeature(Query query, String key, Object value) {
if (value instanceof Tensor) {
query.getRanking().getFeatures().put(key, (Tensor) value);
}
else if (value instanceof Double) {
query.getRanking().getFeatures().put(key, (Double) value);
}
else {
String valueString = asString(value, "");
try {
query.getRanking().getFeatures().put(key, Double.parseDouble(valueString));
}
catch (IllegalArgumentException e) {
query.getRanking().getFeatures().put(key, valueString);
}
}
}
private Object toSpecifiedType(String key, Object value, QueryProfileType type, Map<String,String> context) {
if ( ! ( value instanceof String)) return value;
if (type == null) return value;
FieldDescription field = type.getField(key);
if (field == null) return value;
return field.getType().convertFrom(value, new ConversionContext(key, profileRegistry, embedders, context));
}
private void throwIllegalParameter(String key,String namespace) {
throw new IllegalInputException("'" + key + "' is not a valid property in '" + namespace +
"'. See the query api for valid keys starting by '" + namespace + "'.");
}
@Override
public final Query getParentQuery() {
return query;
}
} |
Tags should match the version number. | public void maintain() {
DeploymentTester tester = new DeploymentTester();
ArtifactExpirer expirer = new ArtifactExpirer(tester.controller(), Duration.ofDays(1));
ArtifactRegistryMock registry = tester.controllerTester().serviceRegistry().artifactRegistry(CloudName.defaultName()).orElseThrow();
Instant instant = tester.clock().instant();
Artifact image0 = new Artifact("image0", "registry.example.com", "vespa/vespa", "7.4", instant, Version.fromString("7.1"));
Artifact image1 = new Artifact("image1", "registry.example.com", "vespa/vespa", "7.4-amd64", instant, Version.fromString("7.2"));
Artifact image2 = new Artifact("image2", "registry.example.com", "vespa/vespa", "7.4-amd64", instant, Version.fromString("7.4"));
registry.add(image0)
.add(image1)
.add(image2);
tester.controllerTester().upgradeSystem(image1.version());
expirer.maintain();
assertEquals(List.of(image0, image1, image2), registry.list());
tester.clock().advance(Duration.ofDays(1));
expirer.maintain();
assertEquals(List.of(image0, image1, image2), registry.list());
tester.clock().advance(Duration.ofDays(13).plus(Duration.ofSeconds(1)));
expirer.maintain();
assertEquals(List.of(image1, image2), registry.list());
Artifact image3 = new Artifact("image3", "registry.example.com", "vespa/vespa", "7.3-arm64", tester.clock().instant(), Version.fromString("7.3"));
registry.add(image3);
tester.controllerTester().upgradeSystem(image3.version());
expirer.maintain();
assertEquals(List.of(image3, image2), registry.list());
} | Artifact image1 = new Artifact("image1", "registry.example.com", "vespa/vespa", "7.4-amd64", instant, Version.fromString("7.2")); | public void maintain() {
DeploymentTester tester = new DeploymentTester();
ArtifactExpirer expirer = new ArtifactExpirer(tester.controller(), Duration.ofDays(1));
ArtifactRegistryMock registry = tester.controllerTester().serviceRegistry().artifactRegistry(CloudName.defaultName()).orElseThrow();
Instant instant = tester.clock().instant();
Artifact image0 = new Artifact("image0", "registry.example.com", "vespa/vespa", "7.1", instant, Version.fromString("7.1"));
Artifact image1 = new Artifact("image1", "registry.example.com", "vespa/vespa", "7.2-amd64", instant, Version.fromString("7.2"));
Artifact image2 = new Artifact("image2", "registry.example.com", "vespa/vespa", "7.4-amd64", instant, Version.fromString("7.4"));
registry.add(image0)
.add(image1)
.add(image2);
tester.controllerTester().upgradeSystem(image1.version());
expirer.maintain();
assertEquals(List.of(image0, image1, image2), registry.list());
tester.clock().advance(Duration.ofDays(1));
expirer.maintain();
assertEquals(List.of(image0, image1, image2), registry.list());
tester.clock().advance(Duration.ofDays(13).plus(Duration.ofSeconds(1)));
expirer.maintain();
assertEquals(List.of(image1, image2), registry.list());
Artifact image3 = new Artifact("image3", "registry.example.com", "vespa/vespa", "7.3-arm64", tester.clock().instant(), Version.fromString("7.3"));
registry.add(image3);
tester.controllerTester().upgradeSystem(image3.version());
expirer.maintain();
assertEquals(List.of(image3, image2), registry.list());
} | class ArtifactExpirerTest {
@Test
} | class ArtifactExpirerTest {
@Test
} |
This is not really needed unless the client want to parse the error message. A 500 response is returned whenever a request handler throws an exception. It will result in a HTML response though. | private static RestApi createRestApi(BillingApiHandlerV2 self) {
return RestApi.builder()
/*
* This is the API that is available to tenants to view their status
*/
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}")
.get(self::tenant)
.patch(Slime.class, self::patchTenant))
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}/usage")
.get(self::tenantUsage))
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}/bill")
.get(self::tenantInvoiceList))
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}/bill/{invoice}")
.get(self::tenantInvoice))
/*
* This is the API that is created for accountant role in Vespa Cloud
*/
.addRoute(RestApi.route("/billing/v2/accountant")
.get(self::accountant))
.addRoute(RestApi.route("/billing/v2/accountant/preview/tenant/{tenant}")
.get(self::previewBill)
.post(Slime.class, self::createBill))
.addExceptionMapper(RuntimeException.class, (__, e) -> ErrorResponse.internalServerError(e.getMessage()))
.build();
} | .addExceptionMapper(RuntimeException.class, (__, e) -> ErrorResponse.internalServerError(e.getMessage())) | private static RestApi createRestApi(BillingApiHandlerV2 self) {
return RestApi.builder()
/*
* This is the API that is available to tenants to view their status
*/
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}")
.get(self::tenant)
.patch(Slime.class, self::patchTenant))
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}/usage")
.get(self::tenantUsage))
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}/bill")
.get(self::tenantInvoiceList))
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}/bill/{invoice}")
.get(self::tenantInvoice))
/*
* This is the API that is created for accountant role in Vespa Cloud
*/
.addRoute(RestApi.route("/billing/v2/accountant")
.get(self::accountant))
.addRoute(RestApi.route("/billing/v2/accountant/preview/tenant/{tenant}")
.get(self::previewBill)
.post(Slime.class, self::createBill))
.addExceptionMapper(RuntimeException.class, (__, e) -> ErrorResponse.internalServerError(e.getMessage()))
.build();
} | class BillingApiHandlerV2 extends RestApiRequestHandler<BillingApiHandlerV2> {
private static final String[] CSV_INVOICE_HEADER = new String[]{ "ID", "Tenant", "From", "To", "CpuHours", "MemoryHours", "DiskHours", "Cpu", "Memory", "Disk", "Additional" };
private final ApplicationController applications;
private final TenantController tenants;
private final BillingController billing;
private final Clock clock;
public BillingApiHandlerV2(ThreadedHttpRequestHandler.Context context, Controller controller) {
super(context, BillingApiHandlerV2::createRestApi);
this.applications = controller.applications();
this.tenants = controller.tenants();
this.billing = controller.serviceRegistry().billingController();
this.clock = controller.serviceRegistry().clock();
}
private Slime tenant(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var plan = billing.getPlan(tenant.name());
var collectionMethod = billing.getCollectionMethod(tenant.name());
var response = new Slime();
var cursor = response.setObject();
cursor.setString("tenant", tenant.name().value());
cursor.setString("plan", plan.value());
cursor.setString("collection", collectionMethod.name());
return response;
}
private Slime patchTenant(RestApi.RequestContext requestContext, Slime body) {
var security = requestContext.attributes().get(SecurityContext.ATTRIBUTE_NAME)
.map(SecurityContext.class::cast)
.orElseThrow(() -> new RestApiException.Forbidden("Must be logged in"));
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var newPlan = body.get().field("plan");
var newCollection = body.get().field("collection");
if (newPlan.valid() && newPlan.type() == Type.STRING) {
var planId = PlanId.from(newPlan.asString());
var hasDeployments = tenantHasDeployments(tenant.name());
var result = billing.setPlan(tenant.name(), planId, hasDeployments, false);
if (! result.isSuccess()) {
throw new RestApiException.Forbidden(result.getErrorMessage().get());
}
}
if (newCollection.valid() && newCollection.type() == Type.STRING) {
if (security.roles().contains(Role.hostedAccountant())) {
var collection = CollectionMethod.valueOf(newCollection.asString());
billing.setCollectionMethod(tenant.name(), collection);
} else {
throw new RestApiException.Forbidden("Only accountant can change billing method");
}
}
var response = new Slime();
var cursor = response.setObject();
cursor.setString("tenant", tenant.name().value());
cursor.setString("plan", billing.getPlan(tenant.name()).value());
cursor.setString("collection", billing.getCollectionMethod(tenant.name()).name());
return response;
}
private Slime tenantInvoiceList(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var slime = new Slime();
invoicesSummaryToSlime(slime.setObject().setArray("invoices"), billing.getBillsForTenant(tenant.name()));
return slime;
}
private HttpResponse tenantInvoice(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var invoiceId = requestContext.pathParameters().getStringOrThrow("invoice");
var format = requestContext.queryParameters().getString("format").orElse("json");
var invoice = billing.getBillsForTenant(tenant.name()).stream()
.filter(inv -> inv.id().value().equals(invoiceId))
.findAny()
.orElseThrow(RestApiException.NotFound::new);
if (format.equals("json")) {
var slime = new Slime();
toSlime(slime.setObject(), invoice);
return new SlimeJsonResponse(slime);
}
if (format.equals("csv")) {
var csv = toCsv(invoice);
return new CsvResponse(CSV_INVOICE_HEADER, csv);
}
throw new RestApiException.BadRequest("Unknown format: " + format);
}
private boolean tenantHasDeployments(TenantName tenant) {
return applications.asList(tenant).stream()
.flatMap(app -> app.instances().values().stream())
.mapToLong(instance -> instance.deployments().size())
.sum() > 0;
}
private Slime tenantUsage(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var untilAt = untilParameter(requestContext);
var usage = billing.createUncommittedBill(tenant.name(), untilAt);
var slime = new Slime();
usageToSlime(slime.setObject(), usage);
return slime;
}
private Slime accountant(RestApi.RequestContext requestContext) {
var untilAt = untilParameter(requestContext);
var usagePerTenant = billing.createUncommittedBills(untilAt);
var response = new Slime();
var tenantsResponse = response.setObject().setArray("tenants");
tenants.asList().stream().sorted(Comparator.comparing(Tenant::name)).forEach(tenant -> {
var usage = Optional.ofNullable(usagePerTenant.get(tenant.name()));
var tenantResponse = tenantsResponse.addObject();
tenantResponse.setString("tenant", tenant.name().value());
tenantResponse.setString("plan", billing.getPlan(tenant.name()).value());
tenantResponse.setString("collection", billing.getCollectionMethod(tenant.name()).name());
tenantResponse.setString("lastBill", usage.map(Bill::getStartDate).map(DateTimeFormatter.ISO_DATE::format).orElse(null));
tenantResponse.setString("unbilled", usage.map(Bill::sum).map(BigDecimal::toPlainString).orElse("0.00"));
});
return response;
}
private Slime previewBill(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var untilAt = untilParameter(requestContext);
var usage = billing.createUncommittedBill(tenant.name(), untilAt);
var slime = new Slime();
toSlime(slime.setObject(), usage);
return slime;
}
private HttpResponse createBill(RestApi.RequestContext requestContext, Slime slime) {
var body = slime.get();
var security = requestContext.attributes().get(SecurityContext.ATTRIBUTE_NAME)
.map(SecurityContext.class::cast)
.orElseThrow(() -> new RestApiException.Forbidden("Must be logged in"));
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var startAt = LocalDate.parse(getInspectorFieldOrThrow(body, "from")).atStartOfDay(ZoneOffset.UTC);
var endAt = LocalDate.parse(getInspectorFieldOrThrow(body, "to")).plusDays(1).atStartOfDay(ZoneOffset.UTC);
var invoiceId = billing.createBillForPeriod(tenant.name(), startAt, endAt, security.principal().getName());
return new MessageResponse("Created bill " + invoiceId.value());
}
private void invoicesSummaryToSlime(Cursor slime, List<Bill> bills) {
bills.forEach(invoice -> invoiceSummaryToSlime(slime.addObject(), invoice));
}
private void invoiceSummaryToSlime(Cursor slime, Bill bill) {
slime.setString("id", bill.id().value());
slime.setString("from", bill.getStartDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("to", bill.getEndDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("total", bill.sum().toString());
slime.setString("status", bill.status());
}
private void usageToSlime(Cursor slime, Bill bill) {
slime.setString("from", bill.getStartDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("to", bill.getEndTime().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("total", bill.sum().toString());
toSlime(slime.setArray("items"), bill.lineItems());
}
private void toSlime(Cursor slime, Bill bill) {
slime.setString("id", bill.id().value());
slime.setString("from", bill.getStartDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("to", bill.getEndDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("total", bill.sum().toString());
slime.setString("status", bill.status());
toSlime(slime.setArray("statusHistory"), bill.statusHistory());
toSlime(slime.setArray("items"), bill.lineItems());
}
private void toSlime(Cursor slime, Bill.StatusHistory history) {
history.getHistory().forEach((key, value) -> {
var c = slime.addObject();
c.setString("at", key.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME));
c.setString("status", value);
});
}
private void toSlime(Cursor slime, List<Bill.LineItem> items) {
items.forEach(item -> toSlime(slime.addObject(), item));
}
private void toSlime(Cursor slime, Bill.LineItem item) {
slime.setString("id", item.id());
slime.setString("description", item.description());
slime.setString("amount",item.amount().toString());
slime.setString("plan", item.plan());
slime.setString("planName", billing.getPlanDisplayName(PlanId.from(item.plan())));
item.applicationId().ifPresent(appId -> {
slime.setString("application", appId.application().value());
slime.setString("instance", appId.instance().value());
});
item.zoneId().ifPresent(z -> slime.setString("zone", z.value()));
toSlime(slime.setObject("cpu"), item.getCpuHours(), item.getCpuCost());
toSlime(slime.setObject("memory"), item.getMemoryHours(), item.getMemoryCost());
toSlime(slime.setObject("disk"), item.getDiskHours(), item.getDiskCost());
}
private void toSlime(Cursor slime, Optional<BigDecimal> hours, Optional<BigDecimal> cost) {
hours.ifPresent(h -> slime.setString("hours", h.toString()));
cost.ifPresent(c -> slime.setString("cost", c.toString()));
}
private List<Object[]> toCsv(Bill bill) {
return List.<Object[]>of(new Object[]{
bill.id().value(), bill.tenant().value(),
bill.getStartDate().format(DateTimeFormatter.ISO_DATE),
bill.getEndDate().format(DateTimeFormatter.ISO_DATE),
bill.sumCpuHours(), bill.sumMemoryHours(), bill.sumDiskHours(),
bill.sumCpuCost(), bill.sumMemoryCost(), bill.sumDiskCost(),
bill.sumAdditionalCost()
});
}
private LocalDate untilParameter(RestApi.RequestContext ctx) {
return ctx.queryParameters().getString("until")
.map(LocalDate::parse)
.map(date -> date.plusDays(1))
.orElseGet(this::tomorrow);
}
private LocalDate tomorrow() {
return LocalDate.now(clock).plusDays(1);
}
private static String getInspectorFieldOrThrow(Inspector inspector, String field) {
if (!inspector.field(field).valid())
throw new RestApiException.BadRequest("Field " + field + " cannot be null");
return inspector.field(field).asString();
}
} | class BillingApiHandlerV2 extends RestApiRequestHandler<BillingApiHandlerV2> {
private static final String[] CSV_INVOICE_HEADER = new String[]{ "ID", "Tenant", "From", "To", "CpuHours", "MemoryHours", "DiskHours", "Cpu", "Memory", "Disk", "Additional" };
private final ApplicationController applications;
private final TenantController tenants;
private final BillingController billing;
private final Clock clock;
public BillingApiHandlerV2(ThreadedHttpRequestHandler.Context context, Controller controller) {
super(context, BillingApiHandlerV2::createRestApi);
this.applications = controller.applications();
this.tenants = controller.tenants();
this.billing = controller.serviceRegistry().billingController();
this.clock = controller.serviceRegistry().clock();
}
private Slime tenant(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var plan = billing.getPlan(tenant.name());
var collectionMethod = billing.getCollectionMethod(tenant.name());
var response = new Slime();
var cursor = response.setObject();
cursor.setString("tenant", tenant.name().value());
cursor.setString("plan", plan.value());
cursor.setString("collection", collectionMethod.name());
return response;
}
private Slime patchTenant(RestApi.RequestContext requestContext, Slime body) {
var security = requestContext.attributes().get(SecurityContext.ATTRIBUTE_NAME)
.map(SecurityContext.class::cast)
.orElseThrow(() -> new RestApiException.Forbidden("Must be logged in"));
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var newPlan = body.get().field("plan");
var newCollection = body.get().field("collection");
if (newPlan.valid() && newPlan.type() == Type.STRING) {
var planId = PlanId.from(newPlan.asString());
var hasDeployments = tenantHasDeployments(tenant.name());
var result = billing.setPlan(tenant.name(), planId, hasDeployments, false);
if (! result.isSuccess()) {
throw new RestApiException.Forbidden(result.getErrorMessage().get());
}
}
if (newCollection.valid() && newCollection.type() == Type.STRING) {
if (security.roles().contains(Role.hostedAccountant())) {
var collection = CollectionMethod.valueOf(newCollection.asString());
billing.setCollectionMethod(tenant.name(), collection);
} else {
throw new RestApiException.Forbidden("Only accountant can change billing method");
}
}
var response = new Slime();
var cursor = response.setObject();
cursor.setString("tenant", tenant.name().value());
cursor.setString("plan", billing.getPlan(tenant.name()).value());
cursor.setString("collection", billing.getCollectionMethod(tenant.name()).name());
return response;
}
private Slime tenantInvoiceList(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var slime = new Slime();
invoicesSummaryToSlime(slime.setObject().setArray("invoices"), billing.getBillsForTenant(tenant.name()));
return slime;
}
private HttpResponse tenantInvoice(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var invoiceId = requestContext.pathParameters().getStringOrThrow("invoice");
var format = requestContext.queryParameters().getString("format").orElse("json");
var invoice = billing.getBillsForTenant(tenant.name()).stream()
.filter(inv -> inv.id().value().equals(invoiceId))
.findAny()
.orElseThrow(RestApiException.NotFound::new);
if (format.equals("json")) {
var slime = new Slime();
toSlime(slime.setObject(), invoice);
return new SlimeJsonResponse(slime);
}
if (format.equals("csv")) {
var csv = toCsv(invoice);
return new CsvResponse(CSV_INVOICE_HEADER, csv);
}
throw new RestApiException.BadRequest("Unknown format: " + format);
}
private boolean tenantHasDeployments(TenantName tenant) {
return applications.asList(tenant).stream()
.flatMap(app -> app.instances().values().stream())
.mapToLong(instance -> instance.deployments().size())
.sum() > 0;
}
private Slime tenantUsage(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var untilAt = untilParameter(requestContext);
var usage = billing.createUncommittedBill(tenant.name(), untilAt);
var slime = new Slime();
usageToSlime(slime.setObject(), usage);
return slime;
}
private Slime accountant(RestApi.RequestContext requestContext) {
var untilAt = untilParameter(requestContext);
var usagePerTenant = billing.createUncommittedBills(untilAt);
var response = new Slime();
var tenantsResponse = response.setObject().setArray("tenants");
tenants.asList().stream().sorted(Comparator.comparing(Tenant::name)).forEach(tenant -> {
var usage = Optional.ofNullable(usagePerTenant.get(tenant.name()));
var tenantResponse = tenantsResponse.addObject();
tenantResponse.setString("tenant", tenant.name().value());
tenantResponse.setString("plan", billing.getPlan(tenant.name()).value());
tenantResponse.setString("collection", billing.getCollectionMethod(tenant.name()).name());
tenantResponse.setString("lastBill", usage.map(Bill::getStartDate).map(DateTimeFormatter.ISO_DATE::format).orElse(null));
tenantResponse.setString("unbilled", usage.map(Bill::sum).map(BigDecimal::toPlainString).orElse("0.00"));
});
return response;
}
private Slime previewBill(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var untilAt = untilParameter(requestContext);
var usage = billing.createUncommittedBill(tenant.name(), untilAt);
var slime = new Slime();
toSlime(slime.setObject(), usage);
return slime;
}
private HttpResponse createBill(RestApi.RequestContext requestContext, Slime slime) {
var body = slime.get();
var security = requestContext.attributes().get(SecurityContext.ATTRIBUTE_NAME)
.map(SecurityContext.class::cast)
.orElseThrow(() -> new RestApiException.Forbidden("Must be logged in"));
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var startAt = LocalDate.parse(getInspectorFieldOrThrow(body, "from")).atStartOfDay(ZoneOffset.UTC);
var endAt = LocalDate.parse(getInspectorFieldOrThrow(body, "to")).plusDays(1).atStartOfDay(ZoneOffset.UTC);
var invoiceId = billing.createBillForPeriod(tenant.name(), startAt, endAt, security.principal().getName());
return new MessageResponse("Created bill " + invoiceId.value());
}
private void invoicesSummaryToSlime(Cursor slime, List<Bill> bills) {
bills.forEach(invoice -> invoiceSummaryToSlime(slime.addObject(), invoice));
}
private void invoiceSummaryToSlime(Cursor slime, Bill bill) {
slime.setString("id", bill.id().value());
slime.setString("from", bill.getStartDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("to", bill.getEndDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("total", bill.sum().toString());
slime.setString("status", bill.status());
}
private void usageToSlime(Cursor slime, Bill bill) {
slime.setString("from", bill.getStartDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("to", bill.getEndTime().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("total", bill.sum().toString());
toSlime(slime.setArray("items"), bill.lineItems());
}
private void toSlime(Cursor slime, Bill bill) {
slime.setString("id", bill.id().value());
slime.setString("from", bill.getStartDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("to", bill.getEndDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("total", bill.sum().toString());
slime.setString("status", bill.status());
toSlime(slime.setArray("statusHistory"), bill.statusHistory());
toSlime(slime.setArray("items"), bill.lineItems());
}
private void toSlime(Cursor slime, Bill.StatusHistory history) {
history.getHistory().forEach((key, value) -> {
var c = slime.addObject();
c.setString("at", key.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME));
c.setString("status", value);
});
}
private void toSlime(Cursor slime, List<Bill.LineItem> items) {
items.forEach(item -> toSlime(slime.addObject(), item));
}
private void toSlime(Cursor slime, Bill.LineItem item) {
slime.setString("id", item.id());
slime.setString("description", item.description());
slime.setString("amount",item.amount().toString());
slime.setString("plan", item.plan());
slime.setString("planName", billing.getPlanDisplayName(PlanId.from(item.plan())));
item.applicationId().ifPresent(appId -> {
slime.setString("application", appId.application().value());
slime.setString("instance", appId.instance().value());
});
item.zoneId().ifPresent(z -> slime.setString("zone", z.value()));
toSlime(slime.setObject("cpu"), item.getCpuHours(), item.getCpuCost());
toSlime(slime.setObject("memory"), item.getMemoryHours(), item.getMemoryCost());
toSlime(slime.setObject("disk"), item.getDiskHours(), item.getDiskCost());
}
private void toSlime(Cursor slime, Optional<BigDecimal> hours, Optional<BigDecimal> cost) {
hours.ifPresent(h -> slime.setString("hours", h.toString()));
cost.ifPresent(c -> slime.setString("cost", c.toString()));
}
private List<Object[]> toCsv(Bill bill) {
return List.<Object[]>of(new Object[]{
bill.id().value(), bill.tenant().value(),
bill.getStartDate().format(DateTimeFormatter.ISO_DATE),
bill.getEndDate().format(DateTimeFormatter.ISO_DATE),
bill.sumCpuHours(), bill.sumMemoryHours(), bill.sumDiskHours(),
bill.sumCpuCost(), bill.sumMemoryCost(), bill.sumDiskCost(),
bill.sumAdditionalCost()
});
}
private LocalDate untilParameter(RestApi.RequestContext ctx) {
return ctx.queryParameters().getString("until")
.map(LocalDate::parse)
.map(date -> date.plusDays(1))
.orElseGet(this::tomorrow);
}
private LocalDate tomorrow() {
return LocalDate.now(clock).plusDays(1);
}
private static String getInspectorFieldOrThrow(Inspector inspector, String field) {
if (!inspector.field(field).valid())
throw new RestApiException.BadRequest("Field " + field + " cannot be null");
return inspector.field(field).asString();
}
} |
Yes. Let's keep it JSON :) | private static RestApi createRestApi(BillingApiHandlerV2 self) {
return RestApi.builder()
/*
* This is the API that is available to tenants to view their status
*/
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}")
.get(self::tenant)
.patch(Slime.class, self::patchTenant))
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}/usage")
.get(self::tenantUsage))
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}/bill")
.get(self::tenantInvoiceList))
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}/bill/{invoice}")
.get(self::tenantInvoice))
/*
* This is the API that is created for accountant role in Vespa Cloud
*/
.addRoute(RestApi.route("/billing/v2/accountant")
.get(self::accountant))
.addRoute(RestApi.route("/billing/v2/accountant/preview/tenant/{tenant}")
.get(self::previewBill)
.post(Slime.class, self::createBill))
.addExceptionMapper(RuntimeException.class, (__, e) -> ErrorResponse.internalServerError(e.getMessage()))
.build();
} | .addExceptionMapper(RuntimeException.class, (__, e) -> ErrorResponse.internalServerError(e.getMessage())) | private static RestApi createRestApi(BillingApiHandlerV2 self) {
return RestApi.builder()
/*
* This is the API that is available to tenants to view their status
*/
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}")
.get(self::tenant)
.patch(Slime.class, self::patchTenant))
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}/usage")
.get(self::tenantUsage))
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}/bill")
.get(self::tenantInvoiceList))
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}/bill/{invoice}")
.get(self::tenantInvoice))
/*
* This is the API that is created for accountant role in Vespa Cloud
*/
.addRoute(RestApi.route("/billing/v2/accountant")
.get(self::accountant))
.addRoute(RestApi.route("/billing/v2/accountant/preview/tenant/{tenant}")
.get(self::previewBill)
.post(Slime.class, self::createBill))
.addExceptionMapper(RuntimeException.class, (__, e) -> ErrorResponse.internalServerError(e.getMessage()))
.build();
} | class BillingApiHandlerV2 extends RestApiRequestHandler<BillingApiHandlerV2> {
private static final String[] CSV_INVOICE_HEADER = new String[]{ "ID", "Tenant", "From", "To", "CpuHours", "MemoryHours", "DiskHours", "Cpu", "Memory", "Disk", "Additional" };
private final ApplicationController applications;
private final TenantController tenants;
private final BillingController billing;
private final Clock clock;
public BillingApiHandlerV2(ThreadedHttpRequestHandler.Context context, Controller controller) {
super(context, BillingApiHandlerV2::createRestApi);
this.applications = controller.applications();
this.tenants = controller.tenants();
this.billing = controller.serviceRegistry().billingController();
this.clock = controller.serviceRegistry().clock();
}
private Slime tenant(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var plan = billing.getPlan(tenant.name());
var collectionMethod = billing.getCollectionMethod(tenant.name());
var response = new Slime();
var cursor = response.setObject();
cursor.setString("tenant", tenant.name().value());
cursor.setString("plan", plan.value());
cursor.setString("collection", collectionMethod.name());
return response;
}
private Slime patchTenant(RestApi.RequestContext requestContext, Slime body) {
var security = requestContext.attributes().get(SecurityContext.ATTRIBUTE_NAME)
.map(SecurityContext.class::cast)
.orElseThrow(() -> new RestApiException.Forbidden("Must be logged in"));
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var newPlan = body.get().field("plan");
var newCollection = body.get().field("collection");
if (newPlan.valid() && newPlan.type() == Type.STRING) {
var planId = PlanId.from(newPlan.asString());
var hasDeployments = tenantHasDeployments(tenant.name());
var result = billing.setPlan(tenant.name(), planId, hasDeployments, false);
if (! result.isSuccess()) {
throw new RestApiException.Forbidden(result.getErrorMessage().get());
}
}
if (newCollection.valid() && newCollection.type() == Type.STRING) {
if (security.roles().contains(Role.hostedAccountant())) {
var collection = CollectionMethod.valueOf(newCollection.asString());
billing.setCollectionMethod(tenant.name(), collection);
} else {
throw new RestApiException.Forbidden("Only accountant can change billing method");
}
}
var response = new Slime();
var cursor = response.setObject();
cursor.setString("tenant", tenant.name().value());
cursor.setString("plan", billing.getPlan(tenant.name()).value());
cursor.setString("collection", billing.getCollectionMethod(tenant.name()).name());
return response;
}
private Slime tenantInvoiceList(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var slime = new Slime();
invoicesSummaryToSlime(slime.setObject().setArray("invoices"), billing.getBillsForTenant(tenant.name()));
return slime;
}
private HttpResponse tenantInvoice(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var invoiceId = requestContext.pathParameters().getStringOrThrow("invoice");
var format = requestContext.queryParameters().getString("format").orElse("json");
var invoice = billing.getBillsForTenant(tenant.name()).stream()
.filter(inv -> inv.id().value().equals(invoiceId))
.findAny()
.orElseThrow(RestApiException.NotFound::new);
if (format.equals("json")) {
var slime = new Slime();
toSlime(slime.setObject(), invoice);
return new SlimeJsonResponse(slime);
}
if (format.equals("csv")) {
var csv = toCsv(invoice);
return new CsvResponse(CSV_INVOICE_HEADER, csv);
}
throw new RestApiException.BadRequest("Unknown format: " + format);
}
private boolean tenantHasDeployments(TenantName tenant) {
return applications.asList(tenant).stream()
.flatMap(app -> app.instances().values().stream())
.mapToLong(instance -> instance.deployments().size())
.sum() > 0;
}
private Slime tenantUsage(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var untilAt = untilParameter(requestContext);
var usage = billing.createUncommittedBill(tenant.name(), untilAt);
var slime = new Slime();
usageToSlime(slime.setObject(), usage);
return slime;
}
private Slime accountant(RestApi.RequestContext requestContext) {
var untilAt = untilParameter(requestContext);
var usagePerTenant = billing.createUncommittedBills(untilAt);
var response = new Slime();
var tenantsResponse = response.setObject().setArray("tenants");
tenants.asList().stream().sorted(Comparator.comparing(Tenant::name)).forEach(tenant -> {
var usage = Optional.ofNullable(usagePerTenant.get(tenant.name()));
var tenantResponse = tenantsResponse.addObject();
tenantResponse.setString("tenant", tenant.name().value());
tenantResponse.setString("plan", billing.getPlan(tenant.name()).value());
tenantResponse.setString("collection", billing.getCollectionMethod(tenant.name()).name());
tenantResponse.setString("lastBill", usage.map(Bill::getStartDate).map(DateTimeFormatter.ISO_DATE::format).orElse(null));
tenantResponse.setString("unbilled", usage.map(Bill::sum).map(BigDecimal::toPlainString).orElse("0.00"));
});
return response;
}
private Slime previewBill(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var untilAt = untilParameter(requestContext);
var usage = billing.createUncommittedBill(tenant.name(), untilAt);
var slime = new Slime();
toSlime(slime.setObject(), usage);
return slime;
}
private HttpResponse createBill(RestApi.RequestContext requestContext, Slime slime) {
var body = slime.get();
var security = requestContext.attributes().get(SecurityContext.ATTRIBUTE_NAME)
.map(SecurityContext.class::cast)
.orElseThrow(() -> new RestApiException.Forbidden("Must be logged in"));
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var startAt = LocalDate.parse(getInspectorFieldOrThrow(body, "from")).atStartOfDay(ZoneOffset.UTC);
var endAt = LocalDate.parse(getInspectorFieldOrThrow(body, "to")).plusDays(1).atStartOfDay(ZoneOffset.UTC);
var invoiceId = billing.createBillForPeriod(tenant.name(), startAt, endAt, security.principal().getName());
return new MessageResponse("Created bill " + invoiceId.value());
}
private void invoicesSummaryToSlime(Cursor slime, List<Bill> bills) {
bills.forEach(invoice -> invoiceSummaryToSlime(slime.addObject(), invoice));
}
private void invoiceSummaryToSlime(Cursor slime, Bill bill) {
slime.setString("id", bill.id().value());
slime.setString("from", bill.getStartDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("to", bill.getEndDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("total", bill.sum().toString());
slime.setString("status", bill.status());
}
private void usageToSlime(Cursor slime, Bill bill) {
slime.setString("from", bill.getStartDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("to", bill.getEndTime().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("total", bill.sum().toString());
toSlime(slime.setArray("items"), bill.lineItems());
}
private void toSlime(Cursor slime, Bill bill) {
slime.setString("id", bill.id().value());
slime.setString("from", bill.getStartDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("to", bill.getEndDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("total", bill.sum().toString());
slime.setString("status", bill.status());
toSlime(slime.setArray("statusHistory"), bill.statusHistory());
toSlime(slime.setArray("items"), bill.lineItems());
}
private void toSlime(Cursor slime, Bill.StatusHistory history) {
history.getHistory().forEach((key, value) -> {
var c = slime.addObject();
c.setString("at", key.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME));
c.setString("status", value);
});
}
private void toSlime(Cursor slime, List<Bill.LineItem> items) {
items.forEach(item -> toSlime(slime.addObject(), item));
}
private void toSlime(Cursor slime, Bill.LineItem item) {
slime.setString("id", item.id());
slime.setString("description", item.description());
slime.setString("amount",item.amount().toString());
slime.setString("plan", item.plan());
slime.setString("planName", billing.getPlanDisplayName(PlanId.from(item.plan())));
item.applicationId().ifPresent(appId -> {
slime.setString("application", appId.application().value());
slime.setString("instance", appId.instance().value());
});
item.zoneId().ifPresent(z -> slime.setString("zone", z.value()));
toSlime(slime.setObject("cpu"), item.getCpuHours(), item.getCpuCost());
toSlime(slime.setObject("memory"), item.getMemoryHours(), item.getMemoryCost());
toSlime(slime.setObject("disk"), item.getDiskHours(), item.getDiskCost());
}
private void toSlime(Cursor slime, Optional<BigDecimal> hours, Optional<BigDecimal> cost) {
hours.ifPresent(h -> slime.setString("hours", h.toString()));
cost.ifPresent(c -> slime.setString("cost", c.toString()));
}
private List<Object[]> toCsv(Bill bill) {
return List.<Object[]>of(new Object[]{
bill.id().value(), bill.tenant().value(),
bill.getStartDate().format(DateTimeFormatter.ISO_DATE),
bill.getEndDate().format(DateTimeFormatter.ISO_DATE),
bill.sumCpuHours(), bill.sumMemoryHours(), bill.sumDiskHours(),
bill.sumCpuCost(), bill.sumMemoryCost(), bill.sumDiskCost(),
bill.sumAdditionalCost()
});
}
private LocalDate untilParameter(RestApi.RequestContext ctx) {
return ctx.queryParameters().getString("until")
.map(LocalDate::parse)
.map(date -> date.plusDays(1))
.orElseGet(this::tomorrow);
}
private LocalDate tomorrow() {
return LocalDate.now(clock).plusDays(1);
}
private static String getInspectorFieldOrThrow(Inspector inspector, String field) {
if (!inspector.field(field).valid())
throw new RestApiException.BadRequest("Field " + field + " cannot be null");
return inspector.field(field).asString();
}
} | class BillingApiHandlerV2 extends RestApiRequestHandler<BillingApiHandlerV2> {
private static final String[] CSV_INVOICE_HEADER = new String[]{ "ID", "Tenant", "From", "To", "CpuHours", "MemoryHours", "DiskHours", "Cpu", "Memory", "Disk", "Additional" };
private final ApplicationController applications;
private final TenantController tenants;
private final BillingController billing;
private final Clock clock;
public BillingApiHandlerV2(ThreadedHttpRequestHandler.Context context, Controller controller) {
super(context, BillingApiHandlerV2::createRestApi);
this.applications = controller.applications();
this.tenants = controller.tenants();
this.billing = controller.serviceRegistry().billingController();
this.clock = controller.serviceRegistry().clock();
}
private Slime tenant(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var plan = billing.getPlan(tenant.name());
var collectionMethod = billing.getCollectionMethod(tenant.name());
var response = new Slime();
var cursor = response.setObject();
cursor.setString("tenant", tenant.name().value());
cursor.setString("plan", plan.value());
cursor.setString("collection", collectionMethod.name());
return response;
}
private Slime patchTenant(RestApi.RequestContext requestContext, Slime body) {
var security = requestContext.attributes().get(SecurityContext.ATTRIBUTE_NAME)
.map(SecurityContext.class::cast)
.orElseThrow(() -> new RestApiException.Forbidden("Must be logged in"));
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var newPlan = body.get().field("plan");
var newCollection = body.get().field("collection");
if (newPlan.valid() && newPlan.type() == Type.STRING) {
var planId = PlanId.from(newPlan.asString());
var hasDeployments = tenantHasDeployments(tenant.name());
var result = billing.setPlan(tenant.name(), planId, hasDeployments, false);
if (! result.isSuccess()) {
throw new RestApiException.Forbidden(result.getErrorMessage().get());
}
}
if (newCollection.valid() && newCollection.type() == Type.STRING) {
if (security.roles().contains(Role.hostedAccountant())) {
var collection = CollectionMethod.valueOf(newCollection.asString());
billing.setCollectionMethod(tenant.name(), collection);
} else {
throw new RestApiException.Forbidden("Only accountant can change billing method");
}
}
var response = new Slime();
var cursor = response.setObject();
cursor.setString("tenant", tenant.name().value());
cursor.setString("plan", billing.getPlan(tenant.name()).value());
cursor.setString("collection", billing.getCollectionMethod(tenant.name()).name());
return response;
}
private Slime tenantInvoiceList(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var slime = new Slime();
invoicesSummaryToSlime(slime.setObject().setArray("invoices"), billing.getBillsForTenant(tenant.name()));
return slime;
}
private HttpResponse tenantInvoice(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var invoiceId = requestContext.pathParameters().getStringOrThrow("invoice");
var format = requestContext.queryParameters().getString("format").orElse("json");
var invoice = billing.getBillsForTenant(tenant.name()).stream()
.filter(inv -> inv.id().value().equals(invoiceId))
.findAny()
.orElseThrow(RestApiException.NotFound::new);
if (format.equals("json")) {
var slime = new Slime();
toSlime(slime.setObject(), invoice);
return new SlimeJsonResponse(slime);
}
if (format.equals("csv")) {
var csv = toCsv(invoice);
return new CsvResponse(CSV_INVOICE_HEADER, csv);
}
throw new RestApiException.BadRequest("Unknown format: " + format);
}
private boolean tenantHasDeployments(TenantName tenant) {
return applications.asList(tenant).stream()
.flatMap(app -> app.instances().values().stream())
.mapToLong(instance -> instance.deployments().size())
.sum() > 0;
}
private Slime tenantUsage(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var untilAt = untilParameter(requestContext);
var usage = billing.createUncommittedBill(tenant.name(), untilAt);
var slime = new Slime();
usageToSlime(slime.setObject(), usage);
return slime;
}
private Slime accountant(RestApi.RequestContext requestContext) {
var untilAt = untilParameter(requestContext);
var usagePerTenant = billing.createUncommittedBills(untilAt);
var response = new Slime();
var tenantsResponse = response.setObject().setArray("tenants");
tenants.asList().stream().sorted(Comparator.comparing(Tenant::name)).forEach(tenant -> {
var usage = Optional.ofNullable(usagePerTenant.get(tenant.name()));
var tenantResponse = tenantsResponse.addObject();
tenantResponse.setString("tenant", tenant.name().value());
tenantResponse.setString("plan", billing.getPlan(tenant.name()).value());
tenantResponse.setString("collection", billing.getCollectionMethod(tenant.name()).name());
tenantResponse.setString("lastBill", usage.map(Bill::getStartDate).map(DateTimeFormatter.ISO_DATE::format).orElse(null));
tenantResponse.setString("unbilled", usage.map(Bill::sum).map(BigDecimal::toPlainString).orElse("0.00"));
});
return response;
}
private Slime previewBill(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var untilAt = untilParameter(requestContext);
var usage = billing.createUncommittedBill(tenant.name(), untilAt);
var slime = new Slime();
toSlime(slime.setObject(), usage);
return slime;
}
private HttpResponse createBill(RestApi.RequestContext requestContext, Slime slime) {
var body = slime.get();
var security = requestContext.attributes().get(SecurityContext.ATTRIBUTE_NAME)
.map(SecurityContext.class::cast)
.orElseThrow(() -> new RestApiException.Forbidden("Must be logged in"));
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var startAt = LocalDate.parse(getInspectorFieldOrThrow(body, "from")).atStartOfDay(ZoneOffset.UTC);
var endAt = LocalDate.parse(getInspectorFieldOrThrow(body, "to")).plusDays(1).atStartOfDay(ZoneOffset.UTC);
var invoiceId = billing.createBillForPeriod(tenant.name(), startAt, endAt, security.principal().getName());
return new MessageResponse("Created bill " + invoiceId.value());
}
private void invoicesSummaryToSlime(Cursor slime, List<Bill> bills) {
bills.forEach(invoice -> invoiceSummaryToSlime(slime.addObject(), invoice));
}
private void invoiceSummaryToSlime(Cursor slime, Bill bill) {
slime.setString("id", bill.id().value());
slime.setString("from", bill.getStartDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("to", bill.getEndDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("total", bill.sum().toString());
slime.setString("status", bill.status());
}
private void usageToSlime(Cursor slime, Bill bill) {
slime.setString("from", bill.getStartDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("to", bill.getEndTime().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("total", bill.sum().toString());
toSlime(slime.setArray("items"), bill.lineItems());
}
private void toSlime(Cursor slime, Bill bill) {
slime.setString("id", bill.id().value());
slime.setString("from", bill.getStartDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("to", bill.getEndDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("total", bill.sum().toString());
slime.setString("status", bill.status());
toSlime(slime.setArray("statusHistory"), bill.statusHistory());
toSlime(slime.setArray("items"), bill.lineItems());
}
private void toSlime(Cursor slime, Bill.StatusHistory history) {
history.getHistory().forEach((key, value) -> {
var c = slime.addObject();
c.setString("at", key.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME));
c.setString("status", value);
});
}
private void toSlime(Cursor slime, List<Bill.LineItem> items) {
items.forEach(item -> toSlime(slime.addObject(), item));
}
private void toSlime(Cursor slime, Bill.LineItem item) {
slime.setString("id", item.id());
slime.setString("description", item.description());
slime.setString("amount",item.amount().toString());
slime.setString("plan", item.plan());
slime.setString("planName", billing.getPlanDisplayName(PlanId.from(item.plan())));
item.applicationId().ifPresent(appId -> {
slime.setString("application", appId.application().value());
slime.setString("instance", appId.instance().value());
});
item.zoneId().ifPresent(z -> slime.setString("zone", z.value()));
toSlime(slime.setObject("cpu"), item.getCpuHours(), item.getCpuCost());
toSlime(slime.setObject("memory"), item.getMemoryHours(), item.getMemoryCost());
toSlime(slime.setObject("disk"), item.getDiskHours(), item.getDiskCost());
}
private void toSlime(Cursor slime, Optional<BigDecimal> hours, Optional<BigDecimal> cost) {
hours.ifPresent(h -> slime.setString("hours", h.toString()));
cost.ifPresent(c -> slime.setString("cost", c.toString()));
}
private List<Object[]> toCsv(Bill bill) {
return List.<Object[]>of(new Object[]{
bill.id().value(), bill.tenant().value(),
bill.getStartDate().format(DateTimeFormatter.ISO_DATE),
bill.getEndDate().format(DateTimeFormatter.ISO_DATE),
bill.sumCpuHours(), bill.sumMemoryHours(), bill.sumDiskHours(),
bill.sumCpuCost(), bill.sumMemoryCost(), bill.sumDiskCost(),
bill.sumAdditionalCost()
});
}
private LocalDate untilParameter(RestApi.RequestContext ctx) {
return ctx.queryParameters().getString("until")
.map(LocalDate::parse)
.map(date -> date.plusDays(1))
.orElseGet(this::tomorrow);
}
private LocalDate tomorrow() {
return LocalDate.now(clock).plusDays(1);
}
private static String getInspectorFieldOrThrow(Inspector inspector, String field) {
if (!inspector.field(field).valid())
throw new RestApiException.BadRequest("Field " + field + " cannot be null");
return inspector.field(field).asString();
}
} |
Maybe this should be part of the common function? Why isn't `validateMergedTenantInfo()` called in the other `put*` methods? | private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var mergedContact = TenantContact.empty()
.withName(getString(inspector.field("contact").field("name"), info.contact().name()))
.withEmail(getString(inspector.field("contact").field("email"), info.contact().email()));
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address());
var mergedInfo = info
.withName(getString(inspector.field("tenant").field("name"), info.name()))
.withWebsite(getString(inspector.field("tenant").field("website"), info.website()))
.withContact(mergedContact)
.withAddress(mergedAddress);
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
} | return new MessageResponse("Tenant info updated"); | private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var mergedContact = TenantContact.empty()
.withName(getString(inspector.field("contact").field("name"), info.contact().name()))
.withEmail(getString(inspector.field("contact").field("email"), info.contact().email()));
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address());
var mergedInfo = info
.withName(getString(inspector.field("tenant").field("name"), info.name()))
.withWebsite(getString(inspector.field("tenant").field("website"), info.website()))
.withContact(mergedContact)
.withAddress(mergedAddress);
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
} | class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.code()) {
case NOT_FOUND:
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return tenantInfoProfile(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return tenantInfoBilling(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return tenantInfoContacts(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return putTenantInfo(path.get("tenant"), request, this::putTenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return putTenantInfo(path.get("tenant"), request, this::putTenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return putTenantInfo(path.get("tenant"), request, this::putTenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
List<Application> applications = controller.applications().asList();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
toSlime(tenantArray.addObject(),
tenant,
applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()),
request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse accessRequests(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var accessControlService = controller.serviceRegistry().accessControlService();
var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant);
var managedAccess = accessControlService.getManagedAccess(tenant);
var slime = new Slime();
var cursor = slime.setObject();
cursor.setBool("managedAccess", managedAccess);
accessRoleInformation.getPendingRequest()
.ifPresent(membershipRequest -> {
var requestCursor = cursor.setObject("pendingRequest");
requestCursor.setString("requestTime", membershipRequest.getCreationTime());
requestCursor.setString("reason", membershipRequest.getReason());
});
var auditLogCursor = cursor.setArray("auditLog");
accessRoleInformation.getAuditLog()
.forEach(auditLogEntry -> {
var entryCursor = auditLogCursor.addObject();
entryCursor.setString("created", auditLogEntry.getCreationTime());
entryCursor.setString("approver", auditLogEntry.getApprover());
entryCursor.setString("reason", auditLogEntry.getReason());
entryCursor.setString("status", auditLogEntry.getAction());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse requestSshAccess(String tenantName, HttpRequest request) {
if (!isOperator(request)) {
return ErrorResponse.forbidden("Only operators are allowed to request ssh access");
}
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only request access for cloud tenants");
controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName));
return new MessageResponse("OK");
}
private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var inspector = toSlime(request.getData()).get();
var expiry = inspector.field("expiry").valid() ?
Instant.ofEpochMilli(inspector.field("expiry").asLong()) :
Instant.now().plus(1, ChronoUnit.DAYS);
var approve = inspector.field("approve").asBool();
controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve);
return new MessageResponse("OK");
}
private HttpResponse addManagedAccess(String tenantName) {
return setManagedAccess(tenantName, true);
}
private HttpResponse removeManagedAccess(String tenantName) {
return setManagedAccess(tenantName, false);
}
private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only set access privel for cloud tenants");
controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess);
var slime = new Slime();
slime.setObject().setBool("managedAccess", managedAccess);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private HttpResponse tenantInfoProfile(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfoProfile((CloudTenant)tenant))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private HttpResponse tenantInfoBilling(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfoBilling((CloudTenant)tenant))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private HttpResponse tenantInfoContacts(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfoContacts((CloudTenant) tenant))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("contactName", info.contact().name());
infoCursor.setString("contactEmail", info.contact().email());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
toSlime(info.contacts(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var contact = root.setObject("contact");
contact.setString("name", info.contact().name());
contact.setString("email", info.contact().email());
var tenant = root.setObject("tenant");
tenant.setString("company", info.name());
tenant.setString("website", info.website());
toSlime(info.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfo(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) {
return controller.tenants().get(tenantName)
.map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get()))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var billingContact = info.billingContact();
var contact = root.setObject("contact");
contact.setString("name", billingContact.contact().name());
contact.setString("email", billingContact.contact().email());
contact.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var contact = info.billingContact().contact();
var address = info.billingContact().address();
var mergedContact = updateTenantInfoContact(inspector.field("contact"), contact);
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address());
var mergedBilling = info.billingContact()
.withContact(mergedContact)
.withAddress(mergedAddress);
var mergedInfo = info.withBilling(mergedBilling);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
toSlime(cloudTenant.info().contacts(), root);
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) {
var mergedInfo = cloudTenant.info()
.withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.info().contacts()));
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private void validateMergedTenantInfo(TenantInfo mergedInfo) {
if (mergedInfo.contact().name().isBlank()) {
throw new IllegalArgumentException("'contactName' cannot be empty");
}
if (mergedInfo.contact().email().isBlank()) {
throw new IllegalArgumentException("'contactEmail' cannot be empty");
}
if (! mergedInfo.contact().email().contains("@")) {
throw new IllegalArgumentException("'contactEmail' needs to be an email address");
}
if (! mergedInfo.website().isBlank()) {
try {
new URL(mergedInfo.website());
} catch (MalformedURLException e) {
throw new IllegalArgumentException("'website' needs to be a valid address");
}
}
}
private void toSlime(TenantAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.address());
addressCursor.setString("postalCodeOrZip", address.code());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.region());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantBilling billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.contact().name());
addressCursor.setString("email", billingContact.contact().email());
addressCursor.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), addressCursor);
}
private void toSlime(TenantContacts contacts, Cursor parentCursor) {
Cursor contactsCursor = parentCursor.setArray("contacts");
contacts.all().forEach(contact -> {
Cursor contactCursor = contactsCursor.addObject();
Cursor audiencesArray = contactCursor.setArray("audiences");
contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience)));
switch (contact.type()) {
case EMAIL:
var email = (TenantContacts.EmailContact) contact;
contactCursor.setString("email", email.email());
return;
default:
throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type());
}
});
}
private static TenantContacts.Audience fromAudience(String value) {
switch (value) {
case "tenant": return TenantContacts.Audience.TENANT;
case "notifications": return TenantContacts.Audience.NOTIFICATIONS;
default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'.");
}
}
private static String toAudience(TenantContacts.Audience audience) {
switch (audience) {
case TENANT: return "tenant";
case NOTIFICATIONS: return "notifications";
default: throw new IllegalArgumentException("Unexpected contact audience '" + audience + "'.");
}
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString().trim() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantContact mergedContact = TenantContact.empty()
.withName(getString(insp.field("contactName"), oldInfo.contact().name()))
.withEmail(getString(insp.field("contactEmail"), oldInfo.contact().email()));
TenantInfo mergedInfo = TenantInfo.empty()
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.website()))
.withContact(mergedContact)
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()))
.withContacts(updateTenantInfoContacts(insp.field("contacts"), oldInfo.contacts()));
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) {
if (!insp.valid()) return oldAddress;
TenantAddress address = TenantAddress.empty()
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code()))
.withAddress(getString(insp.field("addressLines"), oldAddress.address()));
List<String> fields = List.of(address.address(),
address.code(),
address.country(),
address.city(),
address.region());
if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank))
return address;
throw new IllegalArgumentException("All address fields must be set");
}
private TenantContact updateTenantInfoContact(Inspector insp, TenantContact oldContact) {
if (!insp.valid()) return oldContact;
String email = getString(insp.field("email"), oldContact.email());
if (!email.isBlank() && !email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return TenantContact.empty()
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()));
}
private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantBilling oldContact) {
if (!insp.valid()) return oldContact;
return TenantBilling.empty()
.withContact(updateTenantInfoContact(insp, oldContact.contact()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private TenantContacts updateTenantInfoContacts(Inspector insp, TenantContacts oldContacts) {
if (!insp.valid()) return oldContacts;
List<TenantContacts.Contact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> {
String email = inspector.field("email").asString().trim();
List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences"))
.map(audience -> fromAudience(audience.asString()))
.collect(Collectors.toUnmodifiableList());
if (!email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return new TenantContacts.EmailContact(audiences, email);
}).collect(toUnmodifiableList());
return new TenantContacts(contacts);
}
private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) {
boolean productionOnly = showOnlyProductionInstances(request);
boolean excludeMessages = "true".equals(request.getProperty("excludeMessages"));
Slime slime = new Slime();
Cursor notificationsArray = slime.setObject().setArray("notifications");
tenant.map(t -> Stream.of(TenantName.from(t)))
.orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream())
.flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream())
.filter(notification ->
propertyEquals(request, "application", ApplicationName::from, notification.source().application()) &&
propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) &&
propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) &&
propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) &&
propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) &&
propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level())))
.forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages));
return new SlimeJsonResponse(slime);
}
private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) {
return Optional.ofNullable(request.getProperty(property))
.map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get()))
.orElse(true);
}
private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) {
cursor.setLong("at", notification.at().toEpochMilli());
cursor.setString("level", notificationLevelAsString(notification.level()));
cursor.setString("type", notificationTypeAsString(notification.type()));
if (!excludeMessages) {
Cursor messagesArray = cursor.setArray("messages");
notification.messages().forEach(messagesArray::addString);
}
if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value());
notification.source().application().ifPresent(application -> cursor.setString("application", application.value()));
notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value()));
notification.source().zoneId().ifPresent(zoneId -> {
cursor.setString("environment", zoneId.environment().value());
cursor.setString("region", zoneId.region().value());
});
notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value()));
notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName()));
notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber));
}
private static String notificationTypeAsString(Notification.Type type) {
switch (type) {
case submission:
case applicationPackage: return "applicationPackage";
case testPackage: return "testPackage";
case deployment: return "deployment";
case feedBlock: return "feedBlock";
case reindex: return "reindex";
default: throw new IllegalArgumentException("No serialization defined for notification type " + type);
}
}
private static String notificationLevelAsString(Notification.Level level) {
switch (level) {
case info: return "info";
case warning: return "warning";
case error: return "error";
default: throw new IllegalArgumentException("No serialization defined for notification level " + level);
}
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
getTenantOrThrow(tenantName);
List<Application> applications = applicationName.isEmpty() ?
controller.applications().asList(tenant) :
controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get()))
.map(List::of)
.orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist"));
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (Application application : applications) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
ZoneId zone = type.zone();
RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision();
byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse devApplicationPackageDiff(RunId runId) {
DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone());
return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number())
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for " + runId));
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
long build;
String parameter = request.getProperty("build");
if (parameter != null)
try {
build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("invalid value for request parameter 'build'", e);
}
else {
build = controller.applications().requireApplication(tenantAndApplication).revisions().last()
.map(version -> version.id().number())
.orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication));
}
RevisionId revision = RevisionId.forProduction(build);
boolean tests = request.getBooleanProperty("tests");
byte[] applicationPackage = tests ?
controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) :
controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision);
String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip";
return new ZipResponse(filename, applicationPackage);
}
private HttpResponse applicationPackageDiff(String tenant, String application, String number) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application);
return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number))
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number));
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) {
Slime slime = new Slime();
OptionalInt allowMajor = OptionalInt.empty();
if (allowMajorParam != null) {
try {
allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e);
}
}
Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor);
slime.setObject().setString("compileVersion", compileVersion.toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
var zoneId = ZoneId.from(request.getProperty("zone"));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class);
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponse.internalServerError(response);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid");
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured");
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse allowArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
if (role.isBlank()) {
return ErrorResponse.badRequest("Archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.of(role));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Archive access role set to '" + role + "' for tenant " + tenantName + ".");
}
private HttpResponse removeArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.empty());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Archive access role removed for tenant " + tenantName + ".");
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id));
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor));
toSlime(node.resources(), nodeObject);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
nodeObject.setBool("down", node.down());
nodeObject.setBool("retired", node.retired() || node.wantToRetire());
nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration());
nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration());
nodeObject.setString("group", node.group());
nodeObject.setLong("index", node.index());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode());
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
try (logStream) {
logStream.transferTo(outputStream);
}
}
};
}
private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant()));
}
private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now));
}
private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant()));
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
boolean upgradeRevision = ! requestObject.field("skipRevision").asBool();
boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") +
(upgradeRevision ? "" : "revision") +
( ! upgradeRevision && ! upgradePlatform ? " and " : "") +
(upgradePlatform ? "" : "platform") +
( ! upgradeRevision || ! upgradePlatform ? " upgrade" : "");
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id + suppressedUpgrades);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), status.application());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application());
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
addRotationId(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller.zoneRegistry()))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void addRotationId(Cursor object, Instance instance) {
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.revisions().last().ifPresent(version -> {
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
addRotationId(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller.zoneRegistry()))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
Stream.concat(status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment()),
controller.jobController().active(instance.id()).stream()
.map(run -> run.id().job())
.filter(job -> job.type().environment().isManuallyDeployed()))
.map(job -> job.type().zone())
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change, Application application) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision)));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
object.setBool("legacy", endpoint.legacy());
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints");
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId)
.scope(Endpoint.Scope.zone);
if (!legacyEndpoints) {
zoneEndpoints = zoneEndpoints.not().legacy().direct();
}
for (var endpoint : zoneEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application)
.targets(deploymentId);
if (!legacyEndpoints) {
declaredEndpoints = declaredEndpoints.not().legacy().direct();
}
for (var endpoint : declaredEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", application.revisions().get(deployment.revision()).stringId());
response.setLong("build", deployment.revision().number());
Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment);
response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli()));
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
if (!deployment.zone().environment().isManuallyDeployed()) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone()));
Optional.ofNullable(status.jobSteps().get(jobId))
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision()));
if ( ! status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else response.setString("status", "running");
});
} else {
var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId()));
deploymentRun.ifPresent(run -> {
response.setString("status", run.hasEnded() ? "complete" : "running");
});
}
}
response.setDouble("quota", deployment.quota().rate());
deployment.cost().ifPresent(cost -> response.setDouble("cost", cost));
controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)
.ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString()));
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
DeploymentId deploymentId = new DeploymentId(instance.id(), zone);
RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant;
RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out;
controller.routing().of(deploymentId).setRoutingStatus(status, agent);
return new MessageResponse(Text.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId())
.requiresRotation()
.primary();
if (primaryEndpoint.isPresent()) {
DeploymentRoutingContext context = controller.routing().of(deploymentId);
RoutingStatus status = context.routingStatus();
array.addString(primaryEndpoint.get().upstreamName(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.value().name());
statusObject.setString("reason", "");
statusObject.setString("agent", status.agent().name());
statusObject.setLong("timestamp", status.changedAt().getEpochSecond());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId,
serviceName,
DomainName.of(host),
HttpURL.Path.parse("/status").append(restPath),
Query.empty().add(request.getJDiscRequest().parameters()));
}
private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodes(deploymentId);
}
private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Query query = Query.empty().add(request.getJDiscRequest().parameters());
query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString());
return controller.serviceRegistry().configServer().getServiceNodePage(
deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query);
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
if (controller.system().isPublic()) {
User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class);
TenantInfo info = controller.tenants().require(tenant, CloudTenant.class)
.info()
.withContact(TenantContact.from(user.name(), user.email()));
controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(info);
controller.tenants().store(lockedTenant);
});
}
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if (!versionStatus.isActive(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Inspector buildField = toSlime(request.getData()).get().field("build");
long build = buildField.valid() ? buildField.asLong() : -1;
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
RevisionId revision = build == -1 ? application.get().revisions().last().get().id()
: getRevision(application.get(), build);
Change change = Change.of(revision);
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
private RevisionId getRevision(Application application, long build) {
return application.revisions().withPackage().stream()
.map(ApplicationVersion::id)
.filter(version -> version.number() == build)
.findFirst()
.filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(),
application.id().application(),
build))
.orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found"));
}
private HttpResponse cancelBuild(String tenantName, String applicationName, String build){
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
RevisionId revision = RevisionId.forProduction(Long.parseLong(build));
controller.applications().lockApplicationOrThrow(id, application -> {
controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped())));
});
return new MessageResponse("Marked build '" + build + "' as non-deployable");
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.collect(toUnmodifiableList());
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.collect(toUnmodifiableList());
Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null;
boolean indexedOnly = request.getBooleanProperty("indexedOnly");
controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed);
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) +
(indexedOnly ? ", for indexed types" : "") +
(speed != null ? ", with speed " + speed : ""));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed));
}
private static String toString(ApplicationReindexing.State state) {
switch (state) {
case PENDING: return "pending";
case RUNNING: return "running";
case FAILED: return "failed";
case SUCCESSFUL: return "successful";
default: return null;
}
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone()),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
ensureApplicationExists(TenantAndApplicationId.from(id), request);
boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("dryRun", options))
.map(Boolean::valueOf)
.orElse(false);
controller.jobController().deploy(id, type, version, applicationPackage, dryRun);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId);
if (systemApplication.isEmpty() || !systemApplication.get().hasApplicationPackage()) {
return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API");
}
String vespaVersion = deployOptions.field("vespaVersion").asString();
if (!vespaVersion.isEmpty() && !vespaVersion.equals("null")) {
return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
boolean forget = request.getBooleanProperty("forget");
if (forget && !isOperator(request))
return ErrorResponse.forbidden("Only operators can forget a tenant");
controller.tenants().delete(TenantName.from(tenantName),
Optional.of(accessControlRequests.credentials(TenantName.from(tenantName),
toSlime(request.getData()).get(),
request.getJDiscRequest())),
forget);
return new MessageResponse("Deleted tenant " + tenantName);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId()))
.filter(run -> ! run.hasEnded())
.ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName()));
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id));
ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent()
? id : TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(prodInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(prodInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
ZoneId testedZone = type.zone();
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().readTestRunnerEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null);
if (report != null) {
Cursor cursor = report.get();
boolean force = request.getBooleanProperty("force");
if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) {
throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString());
}
}
Slime requestPayload;
try {
requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes());
} catch (Exception e) {
throw new IllegalArgumentException("Missing or invalid JSON in request content", e);
}
Cursor requestPayloadCursor = requestPayload.get();
String configId = requestPayloadCursor.field("configId").asString();
long expiresAt = requestPayloadCursor.field("expiresAt").asLong();
if (configId.isEmpty()) {
throw new IllegalArgumentException("Missing configId");
}
Cursor artifactsCursor = requestPayloadCursor.field("artifacts");
int artifactEntries = artifactsCursor.entries();
if (artifactEntries == 0) {
throw new IllegalArgumentException("Missing or empty 'artifacts'");
}
Slime dumpRequest = new Slime();
Cursor dumpRequestCursor = dumpRequest.setObject();
dumpRequestCursor.setLong("createdMillis", controller.clock().millis());
dumpRequestCursor.setString("configId", configId);
Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts");
for (int i = 0; i < artifactEntries; i++) {
dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString());
}
if (expiresAt > 0) {
dumpRequestCursor.setLong("expiresAt", expiresAt);
}
Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions");
if (dumpOptionsCursor.children() > 0) {
SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions"));
}
var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest))));
nodeRepository.updateReports(zone, hostname, reportsUpdate);
boolean wait = request.getBooleanProperty("wait");
if (!wait) return new MessageResponse("Request created");
return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname);
}
private HttpResponse getServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname)
.orElseThrow(() -> new NotExistsException("No service dump for node " + hostname));
return new SlimeJsonResponse(report);
}
private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
int pollInterval = 2;
Slime report;
while (true) {
report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get();
Cursor cursor = report.get();
if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) {
break;
}
final Slime copyForLambda = report;
log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda))));
log.fine("Sleeping " + pollInterval + " seconds before checking report status again");
controller.sleeper().sleep(Duration.ofSeconds(pollInterval));
}
return new SlimeJsonResponse(report);
}
private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
Node node;
try {
node = nodeRepository.getNode(zone, hostname);
} catch (IllegalArgumentException e) {
throw new NotExistsException(hostname);
}
ApplicationId app = ApplicationId.from(tenant, application, instance);
ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner"));
if (!app.equals(owner)) {
throw new IllegalArgumentException("Node is not owned by " + app.toFullString());
}
String json = node.reports().get("serviceDump");
if (json == null) return Optional.empty();
return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
toSlime(object.setObject("integrations").setObject("aws"),
controller.serviceRegistry().roleService().getTenantRole(tenant.name()),
cloudTenant.tenantSecretStores());
try {
var tenantQuota = controller.serviceRegistry().billingController().getQuota(tenant.name());
var usedQuota = applications.stream()
.map(Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(tenantQuota, usedQuota, object.setObject("quota"));
} catch (Exception e) {
log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e)));
}
cloudTenant.archiveAccessRole().ifPresent(role -> object.setString("archiveAccessRole", role));
break;
}
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : applications) {
DeploymentStatus status = null;
Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values();
if (instances.isEmpty() && !showOnlyActiveInstances(request))
toSlime(application.id(), applicationArray.addObject(), request);
for (Instance instance : instances) {
if (showOnlyActiveInstances(request) && instance.deployments().isEmpty())
continue;
if (recurseOverApplications(request)) {
if (status == null) status = controller.jobController().deploymentStatus(application);
toSlime(applicationArray.addObject(), instance, status, request);
} else {
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
}
tenantMetaDataToSlime(tenant, applications, object.setObject("metaData"));
}
private void toSlime(Quota quota, QuotaUsage usage, Cursor object) {
quota.budget().ifPresentOrElse(
budget -> object.setDouble("budget", budget.doubleValue()),
() -> object.setNix("budget")
);
object.setDouble("budgetUsed", usage.rate());
quota.maxClusterSize().ifPresent(maxClusterSize -> object.setLong("clusterSize", maxClusterSize));
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system());
object.setDouble("cost", cost);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("currentCpu", utilization.currentCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("currentMemory", utilization.currentMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
utilizationObject.setDouble("currentDisk", utilization.currentDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli()));
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) {
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> instance.deployments().values().stream()
.filter(deployment -> deployment.zone().environment() == Environment.dev)
.map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment)))
.max(Comparator.naturalOrder())
.or(() -> applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream()
.filter(job -> job.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder()));
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
if (tenant.type() == Tenant.Type.deleted)
object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
toSlime(secretStore.addObject(), store);
});
}
private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) {
object.setString("tenantRole", tenantRoles.containerRole());
var stores = object.setArray("accounts");
tenantSecretStores.forEach(secretStore -> {
toSlime(stores.addObject(), secretStore);
});
}
private void toSlime(Cursor object, TenantSecretStore secretStore) {
object.setString("name", secretStore.getName());
object.setString("awsId", secretStore.getAwsId());
object.setString("role", secretStore.getRole());
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static boolean showOnlyActiveInstances(HttpRequest request) {
return "true".equals(request.getProperty("activeInstances"));
}
private static boolean includeDeleted(HttpRequest request) {
return "true".equals(request.getProperty("includeDeleted"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
case deleted: return "DELETED";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry());
}
private RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = submitOptions.field("projectId").asLong();
projectId = projectId == 0 ? 1 : projectId;
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
Optional<String> description = optional("description", submitOptions);
int risk = (int) submitOptions.field("risk").asLong();
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
byte[] testPackage = dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP);
Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application);
ensureApplicationExists(id, request);
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId);
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
TenantAndApplicationId.from(tenant, application),
new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(),
Optional.empty(), Optional.empty(), Optional.empty(), 0),
0);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
ZoneId zone = ZoneId.from(environment, region);
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case weighted: return "weighted";
case application: return "application";
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) {
if (controller.applications().getApplication(id).isEmpty()) {
log.fine("Application does not exist in public, creating: " + id);
var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest());
controller.applications().createApplication(id, credentials);
}
}
} | class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (RestApiException.Forbidden e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (RestApiException.Unauthorized e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.code()) {
case NOT_FOUND:
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
List<Application> applications = controller.applications().asList();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
toSlime(tenantArray.addObject(),
tenant,
applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()),
request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse accessRequests(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var accessControlService = controller.serviceRegistry().accessControlService();
var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant);
var managedAccess = accessControlService.getManagedAccess(tenant);
var slime = new Slime();
var cursor = slime.setObject();
cursor.setBool("managedAccess", managedAccess);
accessRoleInformation.getPendingRequest()
.ifPresent(membershipRequest -> {
var requestCursor = cursor.setObject("pendingRequest");
requestCursor.setString("requestTime", membershipRequest.getCreationTime());
requestCursor.setString("reason", membershipRequest.getReason());
});
var auditLogCursor = cursor.setArray("auditLog");
accessRoleInformation.getAuditLog()
.forEach(auditLogEntry -> {
var entryCursor = auditLogCursor.addObject();
entryCursor.setString("created", auditLogEntry.getCreationTime());
entryCursor.setString("approver", auditLogEntry.getApprover());
entryCursor.setString("reason", auditLogEntry.getReason());
entryCursor.setString("status", auditLogEntry.getAction());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse requestSshAccess(String tenantName, HttpRequest request) {
if (!isOperator(request)) {
return ErrorResponse.forbidden("Only operators are allowed to request ssh access");
}
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only request access for cloud tenants");
controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName));
return new MessageResponse("OK");
}
private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var inspector = toSlime(request.getData()).get();
var expiry = inspector.field("expiry").valid() ?
Instant.ofEpochMilli(inspector.field("expiry").asLong()) :
Instant.now().plus(1, ChronoUnit.DAYS);
var approve = inspector.field("approve").asBool();
controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve);
return new MessageResponse("OK");
}
private HttpResponse addManagedAccess(String tenantName) {
return setManagedAccess(tenantName, true);
}
private HttpResponse removeManagedAccess(String tenantName) {
return setManagedAccess(tenantName, false);
}
private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only set access privel for cloud tenants");
controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess);
var slime = new Slime();
slime.setObject().setBool("managedAccess", managedAccess);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> handler.apply((CloudTenant) tenant))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("contactName", info.contact().name());
infoCursor.setString("contactEmail", info.contact().email());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
toSlime(info.contacts(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var contact = root.setObject("contact");
contact.setString("name", info.contact().name());
contact.setString("email", info.contact().email());
var tenant = root.setObject("tenant");
tenant.setString("company", info.name());
tenant.setString("website", info.website());
toSlime(info.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) {
return controller.tenants().get(tenantName)
.map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get()))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var billingContact = info.billingContact();
var contact = root.setObject("contact");
contact.setString("name", billingContact.contact().name());
contact.setString("email", billingContact.contact().email());
contact.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var contact = info.billingContact().contact();
var address = info.billingContact().address();
var mergedContact = updateTenantInfoContact(inspector.field("contact"), contact);
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address());
var mergedBilling = info.billingContact()
.withContact(mergedContact)
.withAddress(mergedAddress);
var mergedInfo = info.withBilling(mergedBilling);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
toSlime(cloudTenant.info().contacts(), root);
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) {
var mergedInfo = cloudTenant.info()
.withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.info().contacts()));
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private void validateMergedTenantInfo(TenantInfo mergedInfo) {
if (mergedInfo.contact().name().isBlank()) {
throw new IllegalArgumentException("'contactName' cannot be empty");
}
if (mergedInfo.contact().email().isBlank()) {
throw new IllegalArgumentException("'contactEmail' cannot be empty");
}
if (! mergedInfo.contact().email().contains("@")) {
throw new IllegalArgumentException("'contactEmail' needs to be an email address");
}
if (! mergedInfo.website().isBlank()) {
try {
new URL(mergedInfo.website());
} catch (MalformedURLException e) {
throw new IllegalArgumentException("'website' needs to be a valid address");
}
}
}
private void toSlime(TenantAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.address());
addressCursor.setString("postalCodeOrZip", address.code());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.region());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantBilling billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.contact().name());
addressCursor.setString("email", billingContact.contact().email());
addressCursor.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), addressCursor);
}
private void toSlime(TenantContacts contacts, Cursor parentCursor) {
Cursor contactsCursor = parentCursor.setArray("contacts");
contacts.all().forEach(contact -> {
Cursor contactCursor = contactsCursor.addObject();
Cursor audiencesArray = contactCursor.setArray("audiences");
contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience)));
switch (contact.type()) {
case EMAIL:
var email = (TenantContacts.EmailContact) contact;
contactCursor.setString("email", email.email());
return;
default:
throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type());
}
});
}
private static TenantContacts.Audience fromAudience(String value) {
switch (value) {
case "tenant": return TenantContacts.Audience.TENANT;
case "notifications": return TenantContacts.Audience.NOTIFICATIONS;
default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'.");
}
}
private static String toAudience(TenantContacts.Audience audience) {
switch (audience) {
case TENANT: return "tenant";
case NOTIFICATIONS: return "notifications";
default: throw new IllegalArgumentException("Unexpected contact audience '" + audience + "'.");
}
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString().trim() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantContact mergedContact = TenantContact.empty()
.withName(getString(insp.field("contactName"), oldInfo.contact().name()))
.withEmail(getString(insp.field("contactEmail"), oldInfo.contact().email()));
TenantInfo mergedInfo = TenantInfo.empty()
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.website()))
.withContact(mergedContact)
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()))
.withContacts(updateTenantInfoContacts(insp.field("contacts"), oldInfo.contacts()));
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) {
if (!insp.valid()) return oldAddress;
TenantAddress address = TenantAddress.empty()
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code()))
.withAddress(getString(insp.field("addressLines"), oldAddress.address()));
List<String> fields = List.of(address.address(),
address.code(),
address.country(),
address.city(),
address.region());
if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank))
return address;
throw new IllegalArgumentException("All address fields must be set");
}
private TenantContact updateTenantInfoContact(Inspector insp, TenantContact oldContact) {
if (!insp.valid()) return oldContact;
String email = getString(insp.field("email"), oldContact.email());
if (!email.isBlank() && !email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return TenantContact.empty()
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()));
}
private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantBilling oldContact) {
if (!insp.valid()) return oldContact;
return TenantBilling.empty()
.withContact(updateTenantInfoContact(insp, oldContact.contact()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private TenantContacts updateTenantInfoContacts(Inspector insp, TenantContacts oldContacts) {
if (!insp.valid()) return oldContacts;
List<TenantContacts.Contact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> {
String email = inspector.field("email").asString().trim();
List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences"))
.map(audience -> fromAudience(audience.asString()))
.collect(Collectors.toUnmodifiableList());
if (!email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return new TenantContacts.EmailContact(audiences, email);
}).collect(toUnmodifiableList());
return new TenantContacts(contacts);
}
private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) {
boolean productionOnly = showOnlyProductionInstances(request);
boolean excludeMessages = "true".equals(request.getProperty("excludeMessages"));
Slime slime = new Slime();
Cursor notificationsArray = slime.setObject().setArray("notifications");
tenant.map(t -> Stream.of(TenantName.from(t)))
.orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream())
.flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream())
.filter(notification ->
propertyEquals(request, "application", ApplicationName::from, notification.source().application()) &&
propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) &&
propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) &&
propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) &&
propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) &&
propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level())))
.forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages));
return new SlimeJsonResponse(slime);
}
private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) {
return Optional.ofNullable(request.getProperty(property))
.map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get()))
.orElse(true);
}
private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) {
cursor.setLong("at", notification.at().toEpochMilli());
cursor.setString("level", notificationLevelAsString(notification.level()));
cursor.setString("type", notificationTypeAsString(notification.type()));
if (!excludeMessages) {
Cursor messagesArray = cursor.setArray("messages");
notification.messages().forEach(messagesArray::addString);
}
if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value());
notification.source().application().ifPresent(application -> cursor.setString("application", application.value()));
notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value()));
notification.source().zoneId().ifPresent(zoneId -> {
cursor.setString("environment", zoneId.environment().value());
cursor.setString("region", zoneId.region().value());
});
notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value()));
notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName()));
notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber));
}
private static String notificationTypeAsString(Notification.Type type) {
switch (type) {
case submission:
case applicationPackage: return "applicationPackage";
case testPackage: return "testPackage";
case deployment: return "deployment";
case feedBlock: return "feedBlock";
case reindex: return "reindex";
default: throw new IllegalArgumentException("No serialization defined for notification type " + type);
}
}
private static String notificationLevelAsString(Notification.Level level) {
switch (level) {
case info: return "info";
case warning: return "warning";
case error: return "error";
default: throw new IllegalArgumentException("No serialization defined for notification level " + level);
}
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
getTenantOrThrow(tenantName);
List<Application> applications = applicationName.isEmpty() ?
controller.applications().asList(tenant) :
controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get()))
.map(List::of)
.orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist"));
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (Application application : applications) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
ZoneId zone = type.zone();
RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision();
byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse devApplicationPackageDiff(RunId runId) {
DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone());
return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number())
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for " + runId));
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
long build;
String parameter = request.getProperty("build");
if (parameter != null)
try {
build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("invalid value for request parameter 'build'", e);
}
else {
build = controller.applications().requireApplication(tenantAndApplication).revisions().last()
.map(version -> version.id().number())
.orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication));
}
RevisionId revision = RevisionId.forProduction(build);
boolean tests = request.getBooleanProperty("tests");
byte[] applicationPackage = tests ?
controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) :
controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision);
String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip";
return new ZipResponse(filename, applicationPackage);
}
private HttpResponse applicationPackageDiff(String tenant, String application, String number) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application);
return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number))
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number));
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) {
Slime slime = new Slime();
OptionalInt allowMajor = OptionalInt.empty();
if (allowMajorParam != null) {
try {
allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e);
}
}
Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor);
slime.setObject().setString("compileVersion", compileVersion.toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
var zoneId = ZoneId.from(request.getProperty("zone"));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class);
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponse.internalServerError(response);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid");
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured");
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
if (role.isBlank()) {
return ErrorResponse.badRequest("AWS archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + ".");
}
private HttpResponse removeAwsArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role removed for tenant " + tenantName + ".");
}
private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var member = mandatory("member", data).asString();
if (member.isBlank()) {
return ErrorResponse.badRequest("GCP archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + ".");
}
private HttpResponse removeGcpArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member removed for tenant " + tenantName + ".");
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id));
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor));
toSlime(node.resources(), nodeObject);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
nodeObject.setBool("down", node.down());
nodeObject.setBool("retired", node.retired() || node.wantToRetire());
nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration());
nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration());
nodeObject.setString("group", node.group());
nodeObject.setLong("index", node.index());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode());
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
try (logStream) {
logStream.transferTo(outputStream);
}
}
};
}
private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant()));
}
private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now));
}
private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant()));
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
boolean upgradeRevision = ! requestObject.field("skipRevision").asBool();
boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") +
(upgradeRevision ? "" : "revision") +
( ! upgradeRevision && ! upgradePlatform ? " and " : "") +
(upgradePlatform ? "" : "platform") +
( ! upgradeRevision || ! upgradePlatform ? " upgrade" : "");
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id + suppressedUpgrades);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), status.application());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application());
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
addRotationId(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller.zoneRegistry()))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void addRotationId(Cursor object, Instance instance) {
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.revisions().last().ifPresent(version -> {
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
addRotationId(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller.zoneRegistry()))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
Stream.concat(status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment()),
controller.jobController().active(instance.id()).stream()
.map(run -> run.id().job())
.filter(job -> job.type().environment().isManuallyDeployed()))
.map(job -> job.type().zone())
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change, Application application) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision)));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
object.setBool("legacy", endpoint.legacy());
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints");
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId)
.scope(Endpoint.Scope.zone);
if (!legacyEndpoints) {
zoneEndpoints = zoneEndpoints.not().legacy().direct();
}
for (var endpoint : zoneEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application)
.targets(deploymentId);
if (!legacyEndpoints) {
declaredEndpoints = declaredEndpoints.not().legacy().direct();
}
for (var endpoint : declaredEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", application.revisions().get(deployment.revision()).stringId());
response.setLong("build", deployment.revision().number());
Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment);
response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli()));
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
if (!deployment.zone().environment().isManuallyDeployed()) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone()));
Optional.ofNullable(status.jobSteps().get(jobId))
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision()));
if ( ! status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else response.setString("status", "running");
});
} else {
var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId()));
deploymentRun.ifPresent(run -> {
response.setString("status", run.hasEnded() ? "complete" : "running");
});
}
}
response.setDouble("quota", deployment.quota().rate());
deployment.cost().ifPresent(cost -> response.setDouble("cost", cost));
controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)
.ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString()));
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
DeploymentId deploymentId = new DeploymentId(instance.id(), zone);
RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant;
RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out;
controller.routing().of(deploymentId).setRoutingStatus(status, agent);
return new MessageResponse(Text.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId())
.requiresRotation()
.primary();
if (primaryEndpoint.isPresent()) {
DeploymentRoutingContext context = controller.routing().of(deploymentId);
RoutingStatus status = context.routingStatus();
array.addString(primaryEndpoint.get().upstreamName(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.value().name());
statusObject.setString("reason", "");
statusObject.setString("agent", status.agent().name());
statusObject.setLong("timestamp", status.changedAt().getEpochSecond());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId,
serviceName,
DomainName.of(host),
HttpURL.Path.parse("/status").append(restPath),
Query.empty().add(request.getJDiscRequest().parameters()));
}
private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodes(deploymentId);
}
private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Query query = Query.empty().add(request.getJDiscRequest().parameters());
query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString());
return controller.serviceRegistry().configServer().getServiceNodePage(
deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query);
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
if (controller.system().isPublic()) {
User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class);
TenantInfo info = controller.tenants().require(tenant, CloudTenant.class)
.info()
.withContact(TenantContact.from(user.name(), user.email()));
controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(info);
controller.tenants().store(lockedTenant);
});
}
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if (!versionStatus.isActive(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Inspector buildField = toSlime(request.getData()).get().field("build");
long build = buildField.valid() ? buildField.asLong() : -1;
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
RevisionId revision = build == -1 ? application.get().revisions().last().get().id()
: getRevision(application.get(), build);
Change change = Change.of(revision);
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
private RevisionId getRevision(Application application, long build) {
return application.revisions().withPackage().stream()
.map(ApplicationVersion::id)
.filter(version -> version.number() == build)
.findFirst()
.filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(),
application.id().application(),
build))
.orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found"));
}
private HttpResponse cancelBuild(String tenantName, String applicationName, String build){
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
RevisionId revision = RevisionId.forProduction(Long.parseLong(build));
controller.applications().lockApplicationOrThrow(id, application -> {
controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped())));
});
return new MessageResponse("Marked build '" + build + "' as non-deployable");
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.collect(toUnmodifiableList());
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.collect(toUnmodifiableList());
Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null;
boolean indexedOnly = request.getBooleanProperty("indexedOnly");
controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed);
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) +
(indexedOnly ? ", for indexed types" : "") +
(speed != null ? ", with speed " + speed : ""));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed));
}
private static String toString(ApplicationReindexing.State state) {
switch (state) {
case PENDING: return "pending";
case RUNNING: return "running";
case FAILED: return "failed";
case SUCCESSFUL: return "successful";
default: return null;
}
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone()),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
ensureApplicationExists(TenantAndApplicationId.from(id), request);
boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("dryRun", options))
.map(Boolean::valueOf)
.orElse(false);
controller.jobController().deploy(id, type, version, applicationPackage, dryRun);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId);
if (systemApplication.isEmpty() || !systemApplication.get().hasApplicationPackage()) {
return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API");
}
String vespaVersion = deployOptions.field("vespaVersion").asString();
if (!vespaVersion.isEmpty() && !vespaVersion.equals("null")) {
return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
boolean forget = request.getBooleanProperty("forget");
if (forget && !isOperator(request))
return ErrorResponse.forbidden("Only operators can forget a tenant");
controller.tenants().delete(TenantName.from(tenantName),
Optional.of(accessControlRequests.credentials(TenantName.from(tenantName),
toSlime(request.getData()).get(),
request.getJDiscRequest())),
forget);
return new MessageResponse("Deleted tenant " + tenantName);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId()))
.filter(run -> ! run.hasEnded())
.ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName()));
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id));
ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent()
? id : TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(prodInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(prodInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
ZoneId testedZone = type.zone();
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().readTestRunnerEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null);
if (report != null) {
Cursor cursor = report.get();
boolean force = request.getBooleanProperty("force");
if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) {
throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString());
}
}
Slime requestPayload;
try {
requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes());
} catch (Exception e) {
throw new IllegalArgumentException("Missing or invalid JSON in request content", e);
}
Cursor requestPayloadCursor = requestPayload.get();
String configId = requestPayloadCursor.field("configId").asString();
long expiresAt = requestPayloadCursor.field("expiresAt").asLong();
if (configId.isEmpty()) {
throw new IllegalArgumentException("Missing configId");
}
Cursor artifactsCursor = requestPayloadCursor.field("artifacts");
int artifactEntries = artifactsCursor.entries();
if (artifactEntries == 0) {
throw new IllegalArgumentException("Missing or empty 'artifacts'");
}
Slime dumpRequest = new Slime();
Cursor dumpRequestCursor = dumpRequest.setObject();
dumpRequestCursor.setLong("createdMillis", controller.clock().millis());
dumpRequestCursor.setString("configId", configId);
Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts");
for (int i = 0; i < artifactEntries; i++) {
dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString());
}
if (expiresAt > 0) {
dumpRequestCursor.setLong("expiresAt", expiresAt);
}
Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions");
if (dumpOptionsCursor.children() > 0) {
SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions"));
}
var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest))));
nodeRepository.updateReports(zone, hostname, reportsUpdate);
boolean wait = request.getBooleanProperty("wait");
if (!wait) return new MessageResponse("Request created");
return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname);
}
private HttpResponse getServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname)
.orElseThrow(() -> new NotExistsException("No service dump for node " + hostname));
return new SlimeJsonResponse(report);
}
private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
int pollInterval = 2;
Slime report;
while (true) {
report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get();
Cursor cursor = report.get();
if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) {
break;
}
final Slime copyForLambda = report;
log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda))));
log.fine("Sleeping " + pollInterval + " seconds before checking report status again");
controller.sleeper().sleep(Duration.ofSeconds(pollInterval));
}
return new SlimeJsonResponse(report);
}
private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
Node node;
try {
node = nodeRepository.getNode(zone, hostname);
} catch (IllegalArgumentException e) {
throw new NotExistsException(hostname);
}
ApplicationId app = ApplicationId.from(tenant, application, instance);
ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner"));
if (!app.equals(owner)) {
throw new IllegalArgumentException("Node is not owned by " + app.toFullString());
}
String json = node.reports().get("serviceDump");
if (json == null) return Optional.empty();
return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
toSlime(object.setObject("integrations").setObject("aws"),
controller.serviceRegistry().roleService().getTenantRole(tenant.name()),
cloudTenant.tenantSecretStores());
try {
var tenantQuota = controller.serviceRegistry().billingController().getQuota(tenant.name());
var usedQuota = applications.stream()
.map(Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(tenantQuota, usedQuota, object.setObject("quota"));
} catch (Exception e) {
log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e)));
}
cloudTenant.archiveAccess().awsRole().ifPresent(role -> object.setString("archiveAccessRole", role));
toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess"));
break;
}
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : applications) {
DeploymentStatus status = null;
Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values();
if (instances.isEmpty() && !showOnlyActiveInstances(request))
toSlime(application.id(), applicationArray.addObject(), request);
for (Instance instance : instances) {
if (showOnlyActiveInstances(request) && instance.deployments().isEmpty())
continue;
if (recurseOverApplications(request)) {
if (status == null) status = controller.jobController().deploymentStatus(application);
toSlime(applicationArray.addObject(), instance, status, request);
} else {
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
}
tenantMetaDataToSlime(tenant, applications, object.setObject("metaData"));
}
private void toSlime(ArchiveAccess archiveAccess, Cursor object) {
archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role));
archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member));
}
private void toSlime(Quota quota, QuotaUsage usage, Cursor object) {
quota.budget().ifPresentOrElse(
budget -> object.setDouble("budget", budget.doubleValue()),
() -> object.setNix("budget")
);
object.setDouble("budgetUsed", usage.rate());
quota.maxClusterSize().ifPresent(maxClusterSize -> object.setLong("clusterSize", maxClusterSize));
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system());
object.setDouble("cost", cost);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("currentCpu", utilization.currentCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("currentMemory", utilization.currentMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
utilizationObject.setDouble("currentDisk", utilization.currentDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli()));
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) {
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> instance.deployments().values().stream()
.filter(deployment -> deployment.zone().environment() == Environment.dev)
.map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment)))
.max(Comparator.naturalOrder())
.or(() -> applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream()
.filter(job -> job.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder()));
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
if (tenant.type() == Tenant.Type.deleted)
object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new RestApiException.InternalServerError("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
toSlime(secretStore.addObject(), store);
});
}
private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) {
object.setString("tenantRole", tenantRoles.containerRole());
var stores = object.setArray("accounts");
tenantSecretStores.forEach(secretStore -> {
toSlime(stores.addObject(), secretStore);
});
}
private void toSlime(Cursor object, TenantSecretStore secretStore) {
object.setString("name", secretStore.getName());
object.setString("awsId", secretStore.getAwsId());
object.setString("role", secretStore.getRole());
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static boolean showOnlyActiveInstances(HttpRequest request) {
return "true".equals(request.getProperty("activeInstances"));
}
private static boolean includeDeleted(HttpRequest request) {
return "true".equals(request.getProperty("includeDeleted"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
case deleted: return "DELETED";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry());
}
private RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = submitOptions.field("projectId").asLong();
projectId = projectId == 0 ? 1 : projectId;
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
Optional<String> description = optional("description", submitOptions);
int risk = (int) submitOptions.field("risk").asLong();
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
byte[] testPackage = dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP);
Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application);
ensureApplicationExists(id, request);
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId);
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
TenantAndApplicationId.from(tenant, application),
new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(),
Optional.empty(), Optional.empty(), Optional.empty(), 0),
0);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
ZoneId zone = ZoneId.from(environment, region);
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case weighted: return "weighted";
case application: return "application";
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) {
if (controller.applications().getApplication(id).isEmpty()) {
log.fine("Application does not exist in public, creating: " + id);
var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest());
controller.applications().createApplication(id, credentials);
}
}
} |
Because it does not validate any of the information that is part of this update. We should probably split this into two different validation methods because part of the issue we are trying to solve here is that I can't update a tenant that has bad information in both console forms :) | private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var mergedContact = TenantContact.empty()
.withName(getString(inspector.field("contact").field("name"), info.contact().name()))
.withEmail(getString(inspector.field("contact").field("email"), info.contact().email()));
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address());
var mergedInfo = info
.withName(getString(inspector.field("tenant").field("name"), info.name()))
.withWebsite(getString(inspector.field("tenant").field("website"), info.website()))
.withContact(mergedContact)
.withAddress(mergedAddress);
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
} | return new MessageResponse("Tenant info updated"); | private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var mergedContact = TenantContact.empty()
.withName(getString(inspector.field("contact").field("name"), info.contact().name()))
.withEmail(getString(inspector.field("contact").field("email"), info.contact().email()));
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address());
var mergedInfo = info
.withName(getString(inspector.field("tenant").field("name"), info.name()))
.withWebsite(getString(inspector.field("tenant").field("website"), info.website()))
.withContact(mergedContact)
.withAddress(mergedAddress);
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
} | class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.code()) {
case NOT_FOUND:
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return tenantInfoProfile(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return tenantInfoBilling(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return tenantInfoContacts(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return putTenantInfo(path.get("tenant"), request, this::putTenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return putTenantInfo(path.get("tenant"), request, this::putTenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return putTenantInfo(path.get("tenant"), request, this::putTenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
List<Application> applications = controller.applications().asList();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
toSlime(tenantArray.addObject(),
tenant,
applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()),
request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse accessRequests(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var accessControlService = controller.serviceRegistry().accessControlService();
var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant);
var managedAccess = accessControlService.getManagedAccess(tenant);
var slime = new Slime();
var cursor = slime.setObject();
cursor.setBool("managedAccess", managedAccess);
accessRoleInformation.getPendingRequest()
.ifPresent(membershipRequest -> {
var requestCursor = cursor.setObject("pendingRequest");
requestCursor.setString("requestTime", membershipRequest.getCreationTime());
requestCursor.setString("reason", membershipRequest.getReason());
});
var auditLogCursor = cursor.setArray("auditLog");
accessRoleInformation.getAuditLog()
.forEach(auditLogEntry -> {
var entryCursor = auditLogCursor.addObject();
entryCursor.setString("created", auditLogEntry.getCreationTime());
entryCursor.setString("approver", auditLogEntry.getApprover());
entryCursor.setString("reason", auditLogEntry.getReason());
entryCursor.setString("status", auditLogEntry.getAction());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse requestSshAccess(String tenantName, HttpRequest request) {
if (!isOperator(request)) {
return ErrorResponse.forbidden("Only operators are allowed to request ssh access");
}
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only request access for cloud tenants");
controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName));
return new MessageResponse("OK");
}
private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var inspector = toSlime(request.getData()).get();
var expiry = inspector.field("expiry").valid() ?
Instant.ofEpochMilli(inspector.field("expiry").asLong()) :
Instant.now().plus(1, ChronoUnit.DAYS);
var approve = inspector.field("approve").asBool();
controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve);
return new MessageResponse("OK");
}
private HttpResponse addManagedAccess(String tenantName) {
return setManagedAccess(tenantName, true);
}
private HttpResponse removeManagedAccess(String tenantName) {
return setManagedAccess(tenantName, false);
}
private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only set access privel for cloud tenants");
controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess);
var slime = new Slime();
slime.setObject().setBool("managedAccess", managedAccess);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private HttpResponse tenantInfoProfile(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfoProfile((CloudTenant)tenant))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private HttpResponse tenantInfoBilling(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfoBilling((CloudTenant)tenant))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private HttpResponse tenantInfoContacts(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfoContacts((CloudTenant) tenant))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("contactName", info.contact().name());
infoCursor.setString("contactEmail", info.contact().email());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
toSlime(info.contacts(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var contact = root.setObject("contact");
contact.setString("name", info.contact().name());
contact.setString("email", info.contact().email());
var tenant = root.setObject("tenant");
tenant.setString("company", info.name());
tenant.setString("website", info.website());
toSlime(info.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfo(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) {
return controller.tenants().get(tenantName)
.map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get()))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var billingContact = info.billingContact();
var contact = root.setObject("contact");
contact.setString("name", billingContact.contact().name());
contact.setString("email", billingContact.contact().email());
contact.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var contact = info.billingContact().contact();
var address = info.billingContact().address();
var mergedContact = updateTenantInfoContact(inspector.field("contact"), contact);
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address());
var mergedBilling = info.billingContact()
.withContact(mergedContact)
.withAddress(mergedAddress);
var mergedInfo = info.withBilling(mergedBilling);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
toSlime(cloudTenant.info().contacts(), root);
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) {
var mergedInfo = cloudTenant.info()
.withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.info().contacts()));
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private void validateMergedTenantInfo(TenantInfo mergedInfo) {
if (mergedInfo.contact().name().isBlank()) {
throw new IllegalArgumentException("'contactName' cannot be empty");
}
if (mergedInfo.contact().email().isBlank()) {
throw new IllegalArgumentException("'contactEmail' cannot be empty");
}
if (! mergedInfo.contact().email().contains("@")) {
throw new IllegalArgumentException("'contactEmail' needs to be an email address");
}
if (! mergedInfo.website().isBlank()) {
try {
new URL(mergedInfo.website());
} catch (MalformedURLException e) {
throw new IllegalArgumentException("'website' needs to be a valid address");
}
}
}
private void toSlime(TenantAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.address());
addressCursor.setString("postalCodeOrZip", address.code());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.region());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantBilling billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.contact().name());
addressCursor.setString("email", billingContact.contact().email());
addressCursor.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), addressCursor);
}
private void toSlime(TenantContacts contacts, Cursor parentCursor) {
Cursor contactsCursor = parentCursor.setArray("contacts");
contacts.all().forEach(contact -> {
Cursor contactCursor = contactsCursor.addObject();
Cursor audiencesArray = contactCursor.setArray("audiences");
contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience)));
switch (contact.type()) {
case EMAIL:
var email = (TenantContacts.EmailContact) contact;
contactCursor.setString("email", email.email());
return;
default:
throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type());
}
});
}
private static TenantContacts.Audience fromAudience(String value) {
switch (value) {
case "tenant": return TenantContacts.Audience.TENANT;
case "notifications": return TenantContacts.Audience.NOTIFICATIONS;
default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'.");
}
}
private static String toAudience(TenantContacts.Audience audience) {
switch (audience) {
case TENANT: return "tenant";
case NOTIFICATIONS: return "notifications";
default: throw new IllegalArgumentException("Unexpected contact audience '" + audience + "'.");
}
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString().trim() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantContact mergedContact = TenantContact.empty()
.withName(getString(insp.field("contactName"), oldInfo.contact().name()))
.withEmail(getString(insp.field("contactEmail"), oldInfo.contact().email()));
TenantInfo mergedInfo = TenantInfo.empty()
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.website()))
.withContact(mergedContact)
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()))
.withContacts(updateTenantInfoContacts(insp.field("contacts"), oldInfo.contacts()));
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) {
if (!insp.valid()) return oldAddress;
TenantAddress address = TenantAddress.empty()
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code()))
.withAddress(getString(insp.field("addressLines"), oldAddress.address()));
List<String> fields = List.of(address.address(),
address.code(),
address.country(),
address.city(),
address.region());
if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank))
return address;
throw new IllegalArgumentException("All address fields must be set");
}
private TenantContact updateTenantInfoContact(Inspector insp, TenantContact oldContact) {
if (!insp.valid()) return oldContact;
String email = getString(insp.field("email"), oldContact.email());
if (!email.isBlank() && !email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return TenantContact.empty()
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()));
}
private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantBilling oldContact) {
if (!insp.valid()) return oldContact;
return TenantBilling.empty()
.withContact(updateTenantInfoContact(insp, oldContact.contact()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private TenantContacts updateTenantInfoContacts(Inspector insp, TenantContacts oldContacts) {
if (!insp.valid()) return oldContacts;
List<TenantContacts.Contact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> {
String email = inspector.field("email").asString().trim();
List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences"))
.map(audience -> fromAudience(audience.asString()))
.collect(Collectors.toUnmodifiableList());
if (!email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return new TenantContacts.EmailContact(audiences, email);
}).collect(toUnmodifiableList());
return new TenantContacts(contacts);
}
private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) {
boolean productionOnly = showOnlyProductionInstances(request);
boolean excludeMessages = "true".equals(request.getProperty("excludeMessages"));
Slime slime = new Slime();
Cursor notificationsArray = slime.setObject().setArray("notifications");
tenant.map(t -> Stream.of(TenantName.from(t)))
.orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream())
.flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream())
.filter(notification ->
propertyEquals(request, "application", ApplicationName::from, notification.source().application()) &&
propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) &&
propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) &&
propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) &&
propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) &&
propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level())))
.forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages));
return new SlimeJsonResponse(slime);
}
private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) {
return Optional.ofNullable(request.getProperty(property))
.map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get()))
.orElse(true);
}
private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) {
cursor.setLong("at", notification.at().toEpochMilli());
cursor.setString("level", notificationLevelAsString(notification.level()));
cursor.setString("type", notificationTypeAsString(notification.type()));
if (!excludeMessages) {
Cursor messagesArray = cursor.setArray("messages");
notification.messages().forEach(messagesArray::addString);
}
if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value());
notification.source().application().ifPresent(application -> cursor.setString("application", application.value()));
notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value()));
notification.source().zoneId().ifPresent(zoneId -> {
cursor.setString("environment", zoneId.environment().value());
cursor.setString("region", zoneId.region().value());
});
notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value()));
notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName()));
notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber));
}
private static String notificationTypeAsString(Notification.Type type) {
switch (type) {
case submission:
case applicationPackage: return "applicationPackage";
case testPackage: return "testPackage";
case deployment: return "deployment";
case feedBlock: return "feedBlock";
case reindex: return "reindex";
default: throw new IllegalArgumentException("No serialization defined for notification type " + type);
}
}
private static String notificationLevelAsString(Notification.Level level) {
switch (level) {
case info: return "info";
case warning: return "warning";
case error: return "error";
default: throw new IllegalArgumentException("No serialization defined for notification level " + level);
}
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
getTenantOrThrow(tenantName);
List<Application> applications = applicationName.isEmpty() ?
controller.applications().asList(tenant) :
controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get()))
.map(List::of)
.orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist"));
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (Application application : applications) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
ZoneId zone = type.zone();
RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision();
byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse devApplicationPackageDiff(RunId runId) {
DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone());
return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number())
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for " + runId));
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
long build;
String parameter = request.getProperty("build");
if (parameter != null)
try {
build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("invalid value for request parameter 'build'", e);
}
else {
build = controller.applications().requireApplication(tenantAndApplication).revisions().last()
.map(version -> version.id().number())
.orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication));
}
RevisionId revision = RevisionId.forProduction(build);
boolean tests = request.getBooleanProperty("tests");
byte[] applicationPackage = tests ?
controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) :
controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision);
String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip";
return new ZipResponse(filename, applicationPackage);
}
private HttpResponse applicationPackageDiff(String tenant, String application, String number) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application);
return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number))
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number));
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) {
Slime slime = new Slime();
OptionalInt allowMajor = OptionalInt.empty();
if (allowMajorParam != null) {
try {
allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e);
}
}
Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor);
slime.setObject().setString("compileVersion", compileVersion.toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
var zoneId = ZoneId.from(request.getProperty("zone"));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class);
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponse.internalServerError(response);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid");
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured");
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse allowArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
if (role.isBlank()) {
return ErrorResponse.badRequest("Archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.of(role));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Archive access role set to '" + role + "' for tenant " + tenantName + ".");
}
private HttpResponse removeArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.empty());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Archive access role removed for tenant " + tenantName + ".");
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id));
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor));
toSlime(node.resources(), nodeObject);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
nodeObject.setBool("down", node.down());
nodeObject.setBool("retired", node.retired() || node.wantToRetire());
nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration());
nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration());
nodeObject.setString("group", node.group());
nodeObject.setLong("index", node.index());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode());
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
try (logStream) {
logStream.transferTo(outputStream);
}
}
};
}
private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant()));
}
private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now));
}
private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant()));
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
boolean upgradeRevision = ! requestObject.field("skipRevision").asBool();
boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") +
(upgradeRevision ? "" : "revision") +
( ! upgradeRevision && ! upgradePlatform ? " and " : "") +
(upgradePlatform ? "" : "platform") +
( ! upgradeRevision || ! upgradePlatform ? " upgrade" : "");
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id + suppressedUpgrades);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), status.application());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application());
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
addRotationId(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller.zoneRegistry()))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void addRotationId(Cursor object, Instance instance) {
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.revisions().last().ifPresent(version -> {
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
addRotationId(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller.zoneRegistry()))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
Stream.concat(status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment()),
controller.jobController().active(instance.id()).stream()
.map(run -> run.id().job())
.filter(job -> job.type().environment().isManuallyDeployed()))
.map(job -> job.type().zone())
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change, Application application) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision)));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
object.setBool("legacy", endpoint.legacy());
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints");
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId)
.scope(Endpoint.Scope.zone);
if (!legacyEndpoints) {
zoneEndpoints = zoneEndpoints.not().legacy().direct();
}
for (var endpoint : zoneEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application)
.targets(deploymentId);
if (!legacyEndpoints) {
declaredEndpoints = declaredEndpoints.not().legacy().direct();
}
for (var endpoint : declaredEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", application.revisions().get(deployment.revision()).stringId());
response.setLong("build", deployment.revision().number());
Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment);
response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli()));
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
if (!deployment.zone().environment().isManuallyDeployed()) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone()));
Optional.ofNullable(status.jobSteps().get(jobId))
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision()));
if ( ! status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else response.setString("status", "running");
});
} else {
var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId()));
deploymentRun.ifPresent(run -> {
response.setString("status", run.hasEnded() ? "complete" : "running");
});
}
}
response.setDouble("quota", deployment.quota().rate());
deployment.cost().ifPresent(cost -> response.setDouble("cost", cost));
controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)
.ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString()));
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
DeploymentId deploymentId = new DeploymentId(instance.id(), zone);
RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant;
RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out;
controller.routing().of(deploymentId).setRoutingStatus(status, agent);
return new MessageResponse(Text.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId())
.requiresRotation()
.primary();
if (primaryEndpoint.isPresent()) {
DeploymentRoutingContext context = controller.routing().of(deploymentId);
RoutingStatus status = context.routingStatus();
array.addString(primaryEndpoint.get().upstreamName(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.value().name());
statusObject.setString("reason", "");
statusObject.setString("agent", status.agent().name());
statusObject.setLong("timestamp", status.changedAt().getEpochSecond());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId,
serviceName,
DomainName.of(host),
HttpURL.Path.parse("/status").append(restPath),
Query.empty().add(request.getJDiscRequest().parameters()));
}
private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodes(deploymentId);
}
private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Query query = Query.empty().add(request.getJDiscRequest().parameters());
query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString());
return controller.serviceRegistry().configServer().getServiceNodePage(
deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query);
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
if (controller.system().isPublic()) {
User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class);
TenantInfo info = controller.tenants().require(tenant, CloudTenant.class)
.info()
.withContact(TenantContact.from(user.name(), user.email()));
controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(info);
controller.tenants().store(lockedTenant);
});
}
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if (!versionStatus.isActive(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Inspector buildField = toSlime(request.getData()).get().field("build");
long build = buildField.valid() ? buildField.asLong() : -1;
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
RevisionId revision = build == -1 ? application.get().revisions().last().get().id()
: getRevision(application.get(), build);
Change change = Change.of(revision);
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
private RevisionId getRevision(Application application, long build) {
return application.revisions().withPackage().stream()
.map(ApplicationVersion::id)
.filter(version -> version.number() == build)
.findFirst()
.filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(),
application.id().application(),
build))
.orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found"));
}
private HttpResponse cancelBuild(String tenantName, String applicationName, String build){
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
RevisionId revision = RevisionId.forProduction(Long.parseLong(build));
controller.applications().lockApplicationOrThrow(id, application -> {
controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped())));
});
return new MessageResponse("Marked build '" + build + "' as non-deployable");
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.collect(toUnmodifiableList());
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.collect(toUnmodifiableList());
Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null;
boolean indexedOnly = request.getBooleanProperty("indexedOnly");
controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed);
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) +
(indexedOnly ? ", for indexed types" : "") +
(speed != null ? ", with speed " + speed : ""));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed));
}
private static String toString(ApplicationReindexing.State state) {
switch (state) {
case PENDING: return "pending";
case RUNNING: return "running";
case FAILED: return "failed";
case SUCCESSFUL: return "successful";
default: return null;
}
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone()),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
ensureApplicationExists(TenantAndApplicationId.from(id), request);
boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("dryRun", options))
.map(Boolean::valueOf)
.orElse(false);
controller.jobController().deploy(id, type, version, applicationPackage, dryRun);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId);
if (systemApplication.isEmpty() || !systemApplication.get().hasApplicationPackage()) {
return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API");
}
String vespaVersion = deployOptions.field("vespaVersion").asString();
if (!vespaVersion.isEmpty() && !vespaVersion.equals("null")) {
return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
boolean forget = request.getBooleanProperty("forget");
if (forget && !isOperator(request))
return ErrorResponse.forbidden("Only operators can forget a tenant");
controller.tenants().delete(TenantName.from(tenantName),
Optional.of(accessControlRequests.credentials(TenantName.from(tenantName),
toSlime(request.getData()).get(),
request.getJDiscRequest())),
forget);
return new MessageResponse("Deleted tenant " + tenantName);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId()))
.filter(run -> ! run.hasEnded())
.ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName()));
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id));
ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent()
? id : TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(prodInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(prodInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
ZoneId testedZone = type.zone();
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().readTestRunnerEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null);
if (report != null) {
Cursor cursor = report.get();
boolean force = request.getBooleanProperty("force");
if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) {
throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString());
}
}
Slime requestPayload;
try {
requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes());
} catch (Exception e) {
throw new IllegalArgumentException("Missing or invalid JSON in request content", e);
}
Cursor requestPayloadCursor = requestPayload.get();
String configId = requestPayloadCursor.field("configId").asString();
long expiresAt = requestPayloadCursor.field("expiresAt").asLong();
if (configId.isEmpty()) {
throw new IllegalArgumentException("Missing configId");
}
Cursor artifactsCursor = requestPayloadCursor.field("artifacts");
int artifactEntries = artifactsCursor.entries();
if (artifactEntries == 0) {
throw new IllegalArgumentException("Missing or empty 'artifacts'");
}
Slime dumpRequest = new Slime();
Cursor dumpRequestCursor = dumpRequest.setObject();
dumpRequestCursor.setLong("createdMillis", controller.clock().millis());
dumpRequestCursor.setString("configId", configId);
Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts");
for (int i = 0; i < artifactEntries; i++) {
dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString());
}
if (expiresAt > 0) {
dumpRequestCursor.setLong("expiresAt", expiresAt);
}
Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions");
if (dumpOptionsCursor.children() > 0) {
SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions"));
}
var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest))));
nodeRepository.updateReports(zone, hostname, reportsUpdate);
boolean wait = request.getBooleanProperty("wait");
if (!wait) return new MessageResponse("Request created");
return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname);
}
private HttpResponse getServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname)
.orElseThrow(() -> new NotExistsException("No service dump for node " + hostname));
return new SlimeJsonResponse(report);
}
private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
int pollInterval = 2;
Slime report;
while (true) {
report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get();
Cursor cursor = report.get();
if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) {
break;
}
final Slime copyForLambda = report;
log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda))));
log.fine("Sleeping " + pollInterval + " seconds before checking report status again");
controller.sleeper().sleep(Duration.ofSeconds(pollInterval));
}
return new SlimeJsonResponse(report);
}
private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
Node node;
try {
node = nodeRepository.getNode(zone, hostname);
} catch (IllegalArgumentException e) {
throw new NotExistsException(hostname);
}
ApplicationId app = ApplicationId.from(tenant, application, instance);
ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner"));
if (!app.equals(owner)) {
throw new IllegalArgumentException("Node is not owned by " + app.toFullString());
}
String json = node.reports().get("serviceDump");
if (json == null) return Optional.empty();
return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
toSlime(object.setObject("integrations").setObject("aws"),
controller.serviceRegistry().roleService().getTenantRole(tenant.name()),
cloudTenant.tenantSecretStores());
try {
var tenantQuota = controller.serviceRegistry().billingController().getQuota(tenant.name());
var usedQuota = applications.stream()
.map(Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(tenantQuota, usedQuota, object.setObject("quota"));
} catch (Exception e) {
log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e)));
}
cloudTenant.archiveAccessRole().ifPresent(role -> object.setString("archiveAccessRole", role));
break;
}
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : applications) {
DeploymentStatus status = null;
Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values();
if (instances.isEmpty() && !showOnlyActiveInstances(request))
toSlime(application.id(), applicationArray.addObject(), request);
for (Instance instance : instances) {
if (showOnlyActiveInstances(request) && instance.deployments().isEmpty())
continue;
if (recurseOverApplications(request)) {
if (status == null) status = controller.jobController().deploymentStatus(application);
toSlime(applicationArray.addObject(), instance, status, request);
} else {
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
}
tenantMetaDataToSlime(tenant, applications, object.setObject("metaData"));
}
private void toSlime(Quota quota, QuotaUsage usage, Cursor object) {
quota.budget().ifPresentOrElse(
budget -> object.setDouble("budget", budget.doubleValue()),
() -> object.setNix("budget")
);
object.setDouble("budgetUsed", usage.rate());
quota.maxClusterSize().ifPresent(maxClusterSize -> object.setLong("clusterSize", maxClusterSize));
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system());
object.setDouble("cost", cost);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("currentCpu", utilization.currentCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("currentMemory", utilization.currentMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
utilizationObject.setDouble("currentDisk", utilization.currentDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli()));
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) {
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> instance.deployments().values().stream()
.filter(deployment -> deployment.zone().environment() == Environment.dev)
.map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment)))
.max(Comparator.naturalOrder())
.or(() -> applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream()
.filter(job -> job.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder()));
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
if (tenant.type() == Tenant.Type.deleted)
object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
toSlime(secretStore.addObject(), store);
});
}
private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) {
object.setString("tenantRole", tenantRoles.containerRole());
var stores = object.setArray("accounts");
tenantSecretStores.forEach(secretStore -> {
toSlime(stores.addObject(), secretStore);
});
}
private void toSlime(Cursor object, TenantSecretStore secretStore) {
object.setString("name", secretStore.getName());
object.setString("awsId", secretStore.getAwsId());
object.setString("role", secretStore.getRole());
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static boolean showOnlyActiveInstances(HttpRequest request) {
return "true".equals(request.getProperty("activeInstances"));
}
private static boolean includeDeleted(HttpRequest request) {
return "true".equals(request.getProperty("includeDeleted"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
case deleted: return "DELETED";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry());
}
private RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = submitOptions.field("projectId").asLong();
projectId = projectId == 0 ? 1 : projectId;
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
Optional<String> description = optional("description", submitOptions);
int risk = (int) submitOptions.field("risk").asLong();
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
byte[] testPackage = dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP);
Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application);
ensureApplicationExists(id, request);
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId);
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
TenantAndApplicationId.from(tenant, application),
new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(),
Optional.empty(), Optional.empty(), Optional.empty(), 0),
0);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
ZoneId zone = ZoneId.from(environment, region);
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case weighted: return "weighted";
case application: return "application";
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) {
if (controller.applications().getApplication(id).isEmpty()) {
log.fine("Application does not exist in public, creating: " + id);
var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest());
controller.applications().createApplication(id, credentials);
}
}
} | class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (RestApiException.Forbidden e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (RestApiException.Unauthorized e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.code()) {
case NOT_FOUND:
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
List<Application> applications = controller.applications().asList();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
toSlime(tenantArray.addObject(),
tenant,
applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()),
request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse accessRequests(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var accessControlService = controller.serviceRegistry().accessControlService();
var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant);
var managedAccess = accessControlService.getManagedAccess(tenant);
var slime = new Slime();
var cursor = slime.setObject();
cursor.setBool("managedAccess", managedAccess);
accessRoleInformation.getPendingRequest()
.ifPresent(membershipRequest -> {
var requestCursor = cursor.setObject("pendingRequest");
requestCursor.setString("requestTime", membershipRequest.getCreationTime());
requestCursor.setString("reason", membershipRequest.getReason());
});
var auditLogCursor = cursor.setArray("auditLog");
accessRoleInformation.getAuditLog()
.forEach(auditLogEntry -> {
var entryCursor = auditLogCursor.addObject();
entryCursor.setString("created", auditLogEntry.getCreationTime());
entryCursor.setString("approver", auditLogEntry.getApprover());
entryCursor.setString("reason", auditLogEntry.getReason());
entryCursor.setString("status", auditLogEntry.getAction());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse requestSshAccess(String tenantName, HttpRequest request) {
if (!isOperator(request)) {
return ErrorResponse.forbidden("Only operators are allowed to request ssh access");
}
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only request access for cloud tenants");
controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName));
return new MessageResponse("OK");
}
private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var inspector = toSlime(request.getData()).get();
var expiry = inspector.field("expiry").valid() ?
Instant.ofEpochMilli(inspector.field("expiry").asLong()) :
Instant.now().plus(1, ChronoUnit.DAYS);
var approve = inspector.field("approve").asBool();
controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve);
return new MessageResponse("OK");
}
private HttpResponse addManagedAccess(String tenantName) {
return setManagedAccess(tenantName, true);
}
private HttpResponse removeManagedAccess(String tenantName) {
return setManagedAccess(tenantName, false);
}
private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only set access privel for cloud tenants");
controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess);
var slime = new Slime();
slime.setObject().setBool("managedAccess", managedAccess);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> handler.apply((CloudTenant) tenant))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("contactName", info.contact().name());
infoCursor.setString("contactEmail", info.contact().email());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
toSlime(info.contacts(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var contact = root.setObject("contact");
contact.setString("name", info.contact().name());
contact.setString("email", info.contact().email());
var tenant = root.setObject("tenant");
tenant.setString("company", info.name());
tenant.setString("website", info.website());
toSlime(info.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) {
return controller.tenants().get(tenantName)
.map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get()))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var billingContact = info.billingContact();
var contact = root.setObject("contact");
contact.setString("name", billingContact.contact().name());
contact.setString("email", billingContact.contact().email());
contact.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var contact = info.billingContact().contact();
var address = info.billingContact().address();
var mergedContact = updateTenantInfoContact(inspector.field("contact"), contact);
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address());
var mergedBilling = info.billingContact()
.withContact(mergedContact)
.withAddress(mergedAddress);
var mergedInfo = info.withBilling(mergedBilling);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
toSlime(cloudTenant.info().contacts(), root);
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) {
var mergedInfo = cloudTenant.info()
.withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.info().contacts()));
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private void validateMergedTenantInfo(TenantInfo mergedInfo) {
if (mergedInfo.contact().name().isBlank()) {
throw new IllegalArgumentException("'contactName' cannot be empty");
}
if (mergedInfo.contact().email().isBlank()) {
throw new IllegalArgumentException("'contactEmail' cannot be empty");
}
if (! mergedInfo.contact().email().contains("@")) {
throw new IllegalArgumentException("'contactEmail' needs to be an email address");
}
if (! mergedInfo.website().isBlank()) {
try {
new URL(mergedInfo.website());
} catch (MalformedURLException e) {
throw new IllegalArgumentException("'website' needs to be a valid address");
}
}
}
private void toSlime(TenantAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.address());
addressCursor.setString("postalCodeOrZip", address.code());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.region());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantBilling billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.contact().name());
addressCursor.setString("email", billingContact.contact().email());
addressCursor.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), addressCursor);
}
private void toSlime(TenantContacts contacts, Cursor parentCursor) {
Cursor contactsCursor = parentCursor.setArray("contacts");
contacts.all().forEach(contact -> {
Cursor contactCursor = contactsCursor.addObject();
Cursor audiencesArray = contactCursor.setArray("audiences");
contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience)));
switch (contact.type()) {
case EMAIL:
var email = (TenantContacts.EmailContact) contact;
contactCursor.setString("email", email.email());
return;
default:
throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type());
}
});
}
private static TenantContacts.Audience fromAudience(String value) {
switch (value) {
case "tenant": return TenantContacts.Audience.TENANT;
case "notifications": return TenantContacts.Audience.NOTIFICATIONS;
default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'.");
}
}
private static String toAudience(TenantContacts.Audience audience) {
switch (audience) {
case TENANT: return "tenant";
case NOTIFICATIONS: return "notifications";
default: throw new IllegalArgumentException("Unexpected contact audience '" + audience + "'.");
}
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString().trim() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantContact mergedContact = TenantContact.empty()
.withName(getString(insp.field("contactName"), oldInfo.contact().name()))
.withEmail(getString(insp.field("contactEmail"), oldInfo.contact().email()));
TenantInfo mergedInfo = TenantInfo.empty()
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.website()))
.withContact(mergedContact)
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()))
.withContacts(updateTenantInfoContacts(insp.field("contacts"), oldInfo.contacts()));
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) {
if (!insp.valid()) return oldAddress;
TenantAddress address = TenantAddress.empty()
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code()))
.withAddress(getString(insp.field("addressLines"), oldAddress.address()));
List<String> fields = List.of(address.address(),
address.code(),
address.country(),
address.city(),
address.region());
if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank))
return address;
throw new IllegalArgumentException("All address fields must be set");
}
private TenantContact updateTenantInfoContact(Inspector insp, TenantContact oldContact) {
if (!insp.valid()) return oldContact;
String email = getString(insp.field("email"), oldContact.email());
if (!email.isBlank() && !email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return TenantContact.empty()
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()));
}
private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantBilling oldContact) {
if (!insp.valid()) return oldContact;
return TenantBilling.empty()
.withContact(updateTenantInfoContact(insp, oldContact.contact()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private TenantContacts updateTenantInfoContacts(Inspector insp, TenantContacts oldContacts) {
if (!insp.valid()) return oldContacts;
List<TenantContacts.Contact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> {
String email = inspector.field("email").asString().trim();
List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences"))
.map(audience -> fromAudience(audience.asString()))
.collect(Collectors.toUnmodifiableList());
if (!email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return new TenantContacts.EmailContact(audiences, email);
}).collect(toUnmodifiableList());
return new TenantContacts(contacts);
}
private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) {
boolean productionOnly = showOnlyProductionInstances(request);
boolean excludeMessages = "true".equals(request.getProperty("excludeMessages"));
Slime slime = new Slime();
Cursor notificationsArray = slime.setObject().setArray("notifications");
tenant.map(t -> Stream.of(TenantName.from(t)))
.orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream())
.flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream())
.filter(notification ->
propertyEquals(request, "application", ApplicationName::from, notification.source().application()) &&
propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) &&
propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) &&
propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) &&
propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) &&
propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level())))
.forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages));
return new SlimeJsonResponse(slime);
}
private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) {
return Optional.ofNullable(request.getProperty(property))
.map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get()))
.orElse(true);
}
private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) {
cursor.setLong("at", notification.at().toEpochMilli());
cursor.setString("level", notificationLevelAsString(notification.level()));
cursor.setString("type", notificationTypeAsString(notification.type()));
if (!excludeMessages) {
Cursor messagesArray = cursor.setArray("messages");
notification.messages().forEach(messagesArray::addString);
}
if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value());
notification.source().application().ifPresent(application -> cursor.setString("application", application.value()));
notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value()));
notification.source().zoneId().ifPresent(zoneId -> {
cursor.setString("environment", zoneId.environment().value());
cursor.setString("region", zoneId.region().value());
});
notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value()));
notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName()));
notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber));
}
private static String notificationTypeAsString(Notification.Type type) {
switch (type) {
case submission:
case applicationPackage: return "applicationPackage";
case testPackage: return "testPackage";
case deployment: return "deployment";
case feedBlock: return "feedBlock";
case reindex: return "reindex";
default: throw new IllegalArgumentException("No serialization defined for notification type " + type);
}
}
private static String notificationLevelAsString(Notification.Level level) {
switch (level) {
case info: return "info";
case warning: return "warning";
case error: return "error";
default: throw new IllegalArgumentException("No serialization defined for notification level " + level);
}
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
getTenantOrThrow(tenantName);
List<Application> applications = applicationName.isEmpty() ?
controller.applications().asList(tenant) :
controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get()))
.map(List::of)
.orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist"));
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (Application application : applications) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
ZoneId zone = type.zone();
RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision();
byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse devApplicationPackageDiff(RunId runId) {
DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone());
return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number())
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for " + runId));
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
long build;
String parameter = request.getProperty("build");
if (parameter != null)
try {
build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("invalid value for request parameter 'build'", e);
}
else {
build = controller.applications().requireApplication(tenantAndApplication).revisions().last()
.map(version -> version.id().number())
.orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication));
}
RevisionId revision = RevisionId.forProduction(build);
boolean tests = request.getBooleanProperty("tests");
byte[] applicationPackage = tests ?
controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) :
controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision);
String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip";
return new ZipResponse(filename, applicationPackage);
}
private HttpResponse applicationPackageDiff(String tenant, String application, String number) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application);
return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number))
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number));
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) {
Slime slime = new Slime();
OptionalInt allowMajor = OptionalInt.empty();
if (allowMajorParam != null) {
try {
allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e);
}
}
Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor);
slime.setObject().setString("compileVersion", compileVersion.toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
var zoneId = ZoneId.from(request.getProperty("zone"));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class);
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponse.internalServerError(response);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid");
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured");
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
if (role.isBlank()) {
return ErrorResponse.badRequest("AWS archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + ".");
}
private HttpResponse removeAwsArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role removed for tenant " + tenantName + ".");
}
private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var member = mandatory("member", data).asString();
if (member.isBlank()) {
return ErrorResponse.badRequest("GCP archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + ".");
}
private HttpResponse removeGcpArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member removed for tenant " + tenantName + ".");
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id));
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor));
toSlime(node.resources(), nodeObject);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
nodeObject.setBool("down", node.down());
nodeObject.setBool("retired", node.retired() || node.wantToRetire());
nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration());
nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration());
nodeObject.setString("group", node.group());
nodeObject.setLong("index", node.index());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode());
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
try (logStream) {
logStream.transferTo(outputStream);
}
}
};
}
private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant()));
}
private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now));
}
private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant()));
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
boolean upgradeRevision = ! requestObject.field("skipRevision").asBool();
boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") +
(upgradeRevision ? "" : "revision") +
( ! upgradeRevision && ! upgradePlatform ? " and " : "") +
(upgradePlatform ? "" : "platform") +
( ! upgradeRevision || ! upgradePlatform ? " upgrade" : "");
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id + suppressedUpgrades);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), status.application());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application());
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
addRotationId(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller.zoneRegistry()))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void addRotationId(Cursor object, Instance instance) {
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.revisions().last().ifPresent(version -> {
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
addRotationId(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller.zoneRegistry()))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
Stream.concat(status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment()),
controller.jobController().active(instance.id()).stream()
.map(run -> run.id().job())
.filter(job -> job.type().environment().isManuallyDeployed()))
.map(job -> job.type().zone())
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change, Application application) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision)));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
object.setBool("legacy", endpoint.legacy());
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints");
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId)
.scope(Endpoint.Scope.zone);
if (!legacyEndpoints) {
zoneEndpoints = zoneEndpoints.not().legacy().direct();
}
for (var endpoint : zoneEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application)
.targets(deploymentId);
if (!legacyEndpoints) {
declaredEndpoints = declaredEndpoints.not().legacy().direct();
}
for (var endpoint : declaredEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", application.revisions().get(deployment.revision()).stringId());
response.setLong("build", deployment.revision().number());
Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment);
response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli()));
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
if (!deployment.zone().environment().isManuallyDeployed()) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone()));
Optional.ofNullable(status.jobSteps().get(jobId))
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision()));
if ( ! status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else response.setString("status", "running");
});
} else {
var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId()));
deploymentRun.ifPresent(run -> {
response.setString("status", run.hasEnded() ? "complete" : "running");
});
}
}
response.setDouble("quota", deployment.quota().rate());
deployment.cost().ifPresent(cost -> response.setDouble("cost", cost));
controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)
.ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString()));
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
DeploymentId deploymentId = new DeploymentId(instance.id(), zone);
RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant;
RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out;
controller.routing().of(deploymentId).setRoutingStatus(status, agent);
return new MessageResponse(Text.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId())
.requiresRotation()
.primary();
if (primaryEndpoint.isPresent()) {
DeploymentRoutingContext context = controller.routing().of(deploymentId);
RoutingStatus status = context.routingStatus();
array.addString(primaryEndpoint.get().upstreamName(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.value().name());
statusObject.setString("reason", "");
statusObject.setString("agent", status.agent().name());
statusObject.setLong("timestamp", status.changedAt().getEpochSecond());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId,
serviceName,
DomainName.of(host),
HttpURL.Path.parse("/status").append(restPath),
Query.empty().add(request.getJDiscRequest().parameters()));
}
private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodes(deploymentId);
}
private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Query query = Query.empty().add(request.getJDiscRequest().parameters());
query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString());
return controller.serviceRegistry().configServer().getServiceNodePage(
deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query);
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
if (controller.system().isPublic()) {
User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class);
TenantInfo info = controller.tenants().require(tenant, CloudTenant.class)
.info()
.withContact(TenantContact.from(user.name(), user.email()));
controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(info);
controller.tenants().store(lockedTenant);
});
}
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if (!versionStatus.isActive(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Inspector buildField = toSlime(request.getData()).get().field("build");
long build = buildField.valid() ? buildField.asLong() : -1;
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
RevisionId revision = build == -1 ? application.get().revisions().last().get().id()
: getRevision(application.get(), build);
Change change = Change.of(revision);
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
private RevisionId getRevision(Application application, long build) {
return application.revisions().withPackage().stream()
.map(ApplicationVersion::id)
.filter(version -> version.number() == build)
.findFirst()
.filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(),
application.id().application(),
build))
.orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found"));
}
private HttpResponse cancelBuild(String tenantName, String applicationName, String build){
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
RevisionId revision = RevisionId.forProduction(Long.parseLong(build));
controller.applications().lockApplicationOrThrow(id, application -> {
controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped())));
});
return new MessageResponse("Marked build '" + build + "' as non-deployable");
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.collect(toUnmodifiableList());
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.collect(toUnmodifiableList());
Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null;
boolean indexedOnly = request.getBooleanProperty("indexedOnly");
controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed);
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) +
(indexedOnly ? ", for indexed types" : "") +
(speed != null ? ", with speed " + speed : ""));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed));
}
private static String toString(ApplicationReindexing.State state) {
switch (state) {
case PENDING: return "pending";
case RUNNING: return "running";
case FAILED: return "failed";
case SUCCESSFUL: return "successful";
default: return null;
}
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone()),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
ensureApplicationExists(TenantAndApplicationId.from(id), request);
boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("dryRun", options))
.map(Boolean::valueOf)
.orElse(false);
controller.jobController().deploy(id, type, version, applicationPackage, dryRun);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId);
if (systemApplication.isEmpty() || !systemApplication.get().hasApplicationPackage()) {
return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API");
}
String vespaVersion = deployOptions.field("vespaVersion").asString();
if (!vespaVersion.isEmpty() && !vespaVersion.equals("null")) {
return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
boolean forget = request.getBooleanProperty("forget");
if (forget && !isOperator(request))
return ErrorResponse.forbidden("Only operators can forget a tenant");
controller.tenants().delete(TenantName.from(tenantName),
Optional.of(accessControlRequests.credentials(TenantName.from(tenantName),
toSlime(request.getData()).get(),
request.getJDiscRequest())),
forget);
return new MessageResponse("Deleted tenant " + tenantName);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId()))
.filter(run -> ! run.hasEnded())
.ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName()));
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id));
ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent()
? id : TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(prodInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(prodInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
ZoneId testedZone = type.zone();
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().readTestRunnerEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null);
if (report != null) {
Cursor cursor = report.get();
boolean force = request.getBooleanProperty("force");
if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) {
throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString());
}
}
Slime requestPayload;
try {
requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes());
} catch (Exception e) {
throw new IllegalArgumentException("Missing or invalid JSON in request content", e);
}
Cursor requestPayloadCursor = requestPayload.get();
String configId = requestPayloadCursor.field("configId").asString();
long expiresAt = requestPayloadCursor.field("expiresAt").asLong();
if (configId.isEmpty()) {
throw new IllegalArgumentException("Missing configId");
}
Cursor artifactsCursor = requestPayloadCursor.field("artifacts");
int artifactEntries = artifactsCursor.entries();
if (artifactEntries == 0) {
throw new IllegalArgumentException("Missing or empty 'artifacts'");
}
Slime dumpRequest = new Slime();
Cursor dumpRequestCursor = dumpRequest.setObject();
dumpRequestCursor.setLong("createdMillis", controller.clock().millis());
dumpRequestCursor.setString("configId", configId);
Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts");
for (int i = 0; i < artifactEntries; i++) {
dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString());
}
if (expiresAt > 0) {
dumpRequestCursor.setLong("expiresAt", expiresAt);
}
Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions");
if (dumpOptionsCursor.children() > 0) {
SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions"));
}
var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest))));
nodeRepository.updateReports(zone, hostname, reportsUpdate);
boolean wait = request.getBooleanProperty("wait");
if (!wait) return new MessageResponse("Request created");
return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname);
}
private HttpResponse getServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname)
.orElseThrow(() -> new NotExistsException("No service dump for node " + hostname));
return new SlimeJsonResponse(report);
}
private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
int pollInterval = 2;
Slime report;
while (true) {
report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get();
Cursor cursor = report.get();
if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) {
break;
}
final Slime copyForLambda = report;
log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda))));
log.fine("Sleeping " + pollInterval + " seconds before checking report status again");
controller.sleeper().sleep(Duration.ofSeconds(pollInterval));
}
return new SlimeJsonResponse(report);
}
private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
Node node;
try {
node = nodeRepository.getNode(zone, hostname);
} catch (IllegalArgumentException e) {
throw new NotExistsException(hostname);
}
ApplicationId app = ApplicationId.from(tenant, application, instance);
ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner"));
if (!app.equals(owner)) {
throw new IllegalArgumentException("Node is not owned by " + app.toFullString());
}
String json = node.reports().get("serviceDump");
if (json == null) return Optional.empty();
return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
toSlime(object.setObject("integrations").setObject("aws"),
controller.serviceRegistry().roleService().getTenantRole(tenant.name()),
cloudTenant.tenantSecretStores());
try {
var tenantQuota = controller.serviceRegistry().billingController().getQuota(tenant.name());
var usedQuota = applications.stream()
.map(Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(tenantQuota, usedQuota, object.setObject("quota"));
} catch (Exception e) {
log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e)));
}
cloudTenant.archiveAccess().awsRole().ifPresent(role -> object.setString("archiveAccessRole", role));
toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess"));
break;
}
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : applications) {
DeploymentStatus status = null;
Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values();
if (instances.isEmpty() && !showOnlyActiveInstances(request))
toSlime(application.id(), applicationArray.addObject(), request);
for (Instance instance : instances) {
if (showOnlyActiveInstances(request) && instance.deployments().isEmpty())
continue;
if (recurseOverApplications(request)) {
if (status == null) status = controller.jobController().deploymentStatus(application);
toSlime(applicationArray.addObject(), instance, status, request);
} else {
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
}
tenantMetaDataToSlime(tenant, applications, object.setObject("metaData"));
}
private void toSlime(ArchiveAccess archiveAccess, Cursor object) {
archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role));
archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member));
}
private void toSlime(Quota quota, QuotaUsage usage, Cursor object) {
quota.budget().ifPresentOrElse(
budget -> object.setDouble("budget", budget.doubleValue()),
() -> object.setNix("budget")
);
object.setDouble("budgetUsed", usage.rate());
quota.maxClusterSize().ifPresent(maxClusterSize -> object.setLong("clusterSize", maxClusterSize));
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system());
object.setDouble("cost", cost);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("currentCpu", utilization.currentCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("currentMemory", utilization.currentMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
utilizationObject.setDouble("currentDisk", utilization.currentDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli()));
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) {
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> instance.deployments().values().stream()
.filter(deployment -> deployment.zone().environment() == Environment.dev)
.map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment)))
.max(Comparator.naturalOrder())
.or(() -> applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream()
.filter(job -> job.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder()));
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
if (tenant.type() == Tenant.Type.deleted)
object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new RestApiException.InternalServerError("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
toSlime(secretStore.addObject(), store);
});
}
private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) {
object.setString("tenantRole", tenantRoles.containerRole());
var stores = object.setArray("accounts");
tenantSecretStores.forEach(secretStore -> {
toSlime(stores.addObject(), secretStore);
});
}
private void toSlime(Cursor object, TenantSecretStore secretStore) {
object.setString("name", secretStore.getName());
object.setString("awsId", secretStore.getAwsId());
object.setString("role", secretStore.getRole());
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static boolean showOnlyActiveInstances(HttpRequest request) {
return "true".equals(request.getProperty("activeInstances"));
}
private static boolean includeDeleted(HttpRequest request) {
return "true".equals(request.getProperty("includeDeleted"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
case deleted: return "DELETED";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry());
}
private RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = submitOptions.field("projectId").asLong();
projectId = projectId == 0 ? 1 : projectId;
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
Optional<String> description = optional("description", submitOptions);
int risk = (int) submitOptions.field("risk").asLong();
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
byte[] testPackage = dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP);
Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application);
ensureApplicationExists(id, request);
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId);
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
TenantAndApplicationId.from(tenant, application),
new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(),
Optional.empty(), Optional.empty(), Optional.empty(), 0),
0);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
ZoneId zone = ZoneId.from(environment, region);
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case weighted: return "weighted";
case application: return "application";
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) {
if (controller.applications().getApplication(id).isEmpty()) {
log.fine("Application does not exist in public, creating: " + id);
var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest());
controller.applications().createApplication(id, credentials);
}
}
} |
Return `Optional.empty()` here? Don't want to return ASCII prefix of a binary file | private static Optional<String> sanitize(byte[] data) {
StringBuilder sb = new StringBuilder();
for (byte b : data) {
char c = (char) b;
if (!printableAscii(c) && !tabOrLineBreak(c)) {
break;
}
sb.append(c);
if (sb.length() == maxDataLength) {
break;
}
}
return Optional.of(sb.toString()).filter(s -> !s.isEmpty());
} | break; | private static Optional<String> sanitize(byte[] data) {
StringBuilder sb = new StringBuilder();
for (byte b : data) {
char c = (char) b;
if (!printableAscii(c) && !tabOrLineBreak(c)) {
return Optional.empty();
}
sb.append(c);
if (sb.length() == maxDataLength) {
break;
}
}
return Optional.of(sb.toString()).filter(s -> !s.isEmpty());
} | class Entry implements Comparable<Entry> {
private final static int maxDataLength = 1024;
private final static Comparator<Entry> comparator = Comparator.comparing(Entry::at).reversed();
private final Instant at;
private final String principal;
private final Method method;
private final String resource;
private final Optional<String> data;
public Entry(Instant at, String principal, Method method, String resource, byte[] data) {
this.at = Objects.requireNonNull(at, "at must be non-null");
this.principal = Objects.requireNonNull(principal, "principal must be non-null");
this.method = Objects.requireNonNull(method, "method must be non-null");
this.resource = Objects.requireNonNull(resource, "resource must be non-null");
this.data = sanitize(data);
}
/** Time of the request */
public Instant at() {
return at;
}
/** The principal performing the request */
public String principal() {
return principal;
}
/** Request method */
public Method method() {
return method;
}
/** API resource (URL path) */
public String resource() {
return resource;
}
/** Request data. This may be truncated if request data logged in this entry was too large */
public Optional<String> data() {
return data;
}
@Override
public int compareTo(Entry that) {
return comparator.compare(this, that);
}
/** HTTP methods that should be logged */
public enum Method {
POST,
PATCH,
PUT,
DELETE
}
private static boolean printableAscii(char c) {
return c >= 32 && c <= 126;
}
private static boolean tabOrLineBreak(char c) {
return c == 9 || c == 10 || c == 13;
}
} | class Entry implements Comparable<Entry> {
private final static int maxDataLength = 1024;
private final static Comparator<Entry> comparator = Comparator.comparing(Entry::at).reversed();
private final Instant at;
private final String principal;
private final Method method;
private final String resource;
private final Optional<String> data;
public Entry(Instant at, String principal, Method method, String resource, byte[] data) {
this.at = Objects.requireNonNull(at, "at must be non-null");
this.principal = Objects.requireNonNull(principal, "principal must be non-null");
this.method = Objects.requireNonNull(method, "method must be non-null");
this.resource = Objects.requireNonNull(resource, "resource must be non-null");
this.data = sanitize(data);
}
/** Time of the request */
public Instant at() {
return at;
}
/** The principal performing the request */
public String principal() {
return principal;
}
/** Request method */
public Method method() {
return method;
}
/** API resource (URL path) */
public String resource() {
return resource;
}
/** Request data. This may be truncated if request data logged in this entry was too large */
public Optional<String> data() {
return data;
}
@Override
public int compareTo(Entry that) {
return comparator.compare(this, that);
}
/** HTTP methods that should be logged */
public enum Method {
POST,
PATCH,
PUT,
DELETE
}
private static boolean printableAscii(char c) {
return c >= 32 && c <= 126;
}
private static boolean tabOrLineBreak(char c) {
return c == 9 || c == 10 || c == 13;
}
} |
PTAL | private static Optional<String> sanitize(byte[] data) {
StringBuilder sb = new StringBuilder();
for (byte b : data) {
char c = (char) b;
if (!printableAscii(c) && !tabOrLineBreak(c)) {
break;
}
sb.append(c);
if (sb.length() == maxDataLength) {
break;
}
}
return Optional.of(sb.toString()).filter(s -> !s.isEmpty());
} | break; | private static Optional<String> sanitize(byte[] data) {
StringBuilder sb = new StringBuilder();
for (byte b : data) {
char c = (char) b;
if (!printableAscii(c) && !tabOrLineBreak(c)) {
return Optional.empty();
}
sb.append(c);
if (sb.length() == maxDataLength) {
break;
}
}
return Optional.of(sb.toString()).filter(s -> !s.isEmpty());
} | class Entry implements Comparable<Entry> {
private final static int maxDataLength = 1024;
private final static Comparator<Entry> comparator = Comparator.comparing(Entry::at).reversed();
private final Instant at;
private final String principal;
private final Method method;
private final String resource;
private final Optional<String> data;
public Entry(Instant at, String principal, Method method, String resource, byte[] data) {
this.at = Objects.requireNonNull(at, "at must be non-null");
this.principal = Objects.requireNonNull(principal, "principal must be non-null");
this.method = Objects.requireNonNull(method, "method must be non-null");
this.resource = Objects.requireNonNull(resource, "resource must be non-null");
this.data = sanitize(data);
}
/** Time of the request */
public Instant at() {
return at;
}
/** The principal performing the request */
public String principal() {
return principal;
}
/** Request method */
public Method method() {
return method;
}
/** API resource (URL path) */
public String resource() {
return resource;
}
/** Request data. This may be truncated if request data logged in this entry was too large */
public Optional<String> data() {
return data;
}
@Override
public int compareTo(Entry that) {
return comparator.compare(this, that);
}
/** HTTP methods that should be logged */
public enum Method {
POST,
PATCH,
PUT,
DELETE
}
private static boolean printableAscii(char c) {
return c >= 32 && c <= 126;
}
private static boolean tabOrLineBreak(char c) {
return c == 9 || c == 10 || c == 13;
}
} | class Entry implements Comparable<Entry> {
private final static int maxDataLength = 1024;
private final static Comparator<Entry> comparator = Comparator.comparing(Entry::at).reversed();
private final Instant at;
private final String principal;
private final Method method;
private final String resource;
private final Optional<String> data;
public Entry(Instant at, String principal, Method method, String resource, byte[] data) {
this.at = Objects.requireNonNull(at, "at must be non-null");
this.principal = Objects.requireNonNull(principal, "principal must be non-null");
this.method = Objects.requireNonNull(method, "method must be non-null");
this.resource = Objects.requireNonNull(resource, "resource must be non-null");
this.data = sanitize(data);
}
/** Time of the request */
public Instant at() {
return at;
}
/** The principal performing the request */
public String principal() {
return principal;
}
/** Request method */
public Method method() {
return method;
}
/** API resource (URL path) */
public String resource() {
return resource;
}
/** Request data. This may be truncated if request data logged in this entry was too large */
public Optional<String> data() {
return data;
}
@Override
public int compareTo(Entry that) {
return comparator.compare(this, that);
}
/** HTTP methods that should be logged */
public enum Method {
POST,
PATCH,
PUT,
DELETE
}
private static boolean printableAscii(char c) {
return c >= 32 && c <= 126;
}
private static boolean tabOrLineBreak(char c) {
return c == 9 || c == 10 || c == 13;
}
} |
Should this explicitly check for the string `VESPA_LOAD_CODE_AS_HUGEPAGES=true` instead of just the prefix? | private void verifyCodePlacement(boolean hugePages) {
MockRoot root = new MockRoot("");
SearchNode node = createSearchNode(root, "mynode2", 4, new NodeSpec(7, 5), true, false, hugePages);
node.setHostResource(new HostResource(new Host(node, "mynbode2")));
node.initService(root.getDeployState());
assertEquals(hugePages, node.getStartupCommand().contains("VESPA_LOAD_CODE_AS_HUGEPAGES="));
} | assertEquals(hugePages, node.getStartupCommand().contains("VESPA_LOAD_CODE_AS_HUGEPAGES=")); | private void verifyCodePlacement(boolean hugePages) {
MockRoot root = new MockRoot("");
SearchNode node = createSearchNode(root, "mynode2", 4, new NodeSpec(7, 5), true, false, hugePages);
node.setHostResource(new HostResource(new Host(node, "mynbode2")));
node.initService(root.getDeployState());
assertEquals(hugePages, node.getStartupCommand().contains("VESPA_LOAD_CODE_AS_HUGEPAGES="));
} | class SearchNodeTest {
private void assertBaseDir(String expected, SearchNode node) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
node.getConfig(builder);
ProtonConfig cfg = new ProtonConfig(builder);
assertEquals(expected, cfg.basedir());
}
private void prepare(MockRoot root, SearchNode node, Boolean useFsync) {
Host host = new Host(root, "mockhost");
TransactionLogServer tls = new TransactionLogServer(root, "mycluster", useFsync);
tls.setHostResource(new HostResource(host));
tls.setBasePort(100);
tls.initService(root.getDeployState());
node.setTls(tls);
node.setHostResource(new HostResource(host));
node.setBasePort(200);
node.initService(root.getDeployState());
root.freezeModelTopology();
}
private static SearchNode createSearchNode(MockRoot root, String name, int distributionKey,
NodeSpec nodeSpec, boolean flushOnShutDown, boolean isHosted, boolean loadCodeAsHugePages) {
return SearchNode.create(root, name, distributionKey, nodeSpec, "mycluster", null, flushOnShutDown,
Optional.empty(), Optional.empty(), isHosted, loadCodeAsHugePages, 0.0);
}
private static SearchNode createSearchNode(MockRoot root) {
return createSearchNode(root, "mynode", 3, new NodeSpec(7, 5), true, true, false);
}
@Test
public void requireThatSyncIsHonoured() {
assertTrue(getTlsConfig(new TestProperties(), null).usefsync());
assertTrue(getTlsConfig(new TestProperties(), true).usefsync());
assertFalse(getTlsConfig(new TestProperties(), false).usefsync());
}
@Test
public void requireThatBasedirIsCorrectForElasticMode() {
MockRoot root = new MockRoot("");
SearchNode node = createSearchNode(root, "mynode", 3, new NodeSpec(7, 5), false, root.getDeployState().isHosted(), false);
prepare(root, node, true);
assertBaseDir(Defaults.getDefaults().underVespaHome("var/db/vespa/search/cluster.mycluster/n3"), node);
}
@Test
public void requireThatPreShutdownCommandIsEmptyWhenNotActivated() {
MockRoot root = new MockRoot("");
SearchNode node = createSearchNode(root, "mynode", 3, new NodeSpec(7, 5), false, root.getDeployState().isHosted(), false);
node.setHostResource(new HostResource(new Host(node, "mynbode")));
node.initService(root.getDeployState());
assertFalse(node.getPreShutdownCommand().isPresent());
}
@Test
public void requireThatPreShutdownCommandUsesPrepareRestartWhenActivated() {
MockRoot root = new MockRoot("");
SearchNode node = createSearchNode(root, "mynode2", 4, new NodeSpec(7, 5), true, root.getDeployState().isHosted(), false);
node.setHostResource(new HostResource(new Host(node, "mynbode2")));
node.initService(root.getDeployState());
assertTrue(node.getPreShutdownCommand().isPresent());
assertTrue(node.getPreShutdownCommand().get().contains("vespa-proton-cmd " + node.getRpcPort() + " prepareRestart"));
}
@Test
public void requireThatCodePageTypeCanBeControlled() {
verifyCodePlacement(true);
verifyCodePlacement(false);
}
private MockRoot createRoot(ModelContext.Properties properties) {
return new MockRoot("", new DeployState.Builder().properties(properties).build());
}
private TranslogserverConfig getTlsConfig(ModelContext.Properties properties, Boolean useFsync) {
MockRoot root = createRoot(properties);
SearchNode node = createSearchNode(root);
prepare(root, node, useFsync);
TranslogserverConfig.Builder tlsBuilder = new TranslogserverConfig.Builder();
node.getConfig(tlsBuilder);
return tlsBuilder.build();
}
} | class SearchNodeTest {
private void assertBaseDir(String expected, SearchNode node) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
node.getConfig(builder);
ProtonConfig cfg = new ProtonConfig(builder);
assertEquals(expected, cfg.basedir());
}
private void prepare(MockRoot root, SearchNode node, Boolean useFsync) {
Host host = new Host(root, "mockhost");
TransactionLogServer tls = new TransactionLogServer(root, "mycluster", useFsync);
tls.setHostResource(new HostResource(host));
tls.setBasePort(100);
tls.initService(root.getDeployState());
node.setTls(tls);
node.setHostResource(new HostResource(host));
node.setBasePort(200);
node.initService(root.getDeployState());
root.freezeModelTopology();
}
private static SearchNode createSearchNode(MockRoot root, String name, int distributionKey,
NodeSpec nodeSpec, boolean flushOnShutDown, boolean isHosted, boolean loadCodeAsHugePages) {
return SearchNode.create(root, name, distributionKey, nodeSpec, "mycluster", null, flushOnShutDown,
Optional.empty(), Optional.empty(), isHosted, loadCodeAsHugePages, 0.0);
}
private static SearchNode createSearchNode(MockRoot root) {
return createSearchNode(root, "mynode", 3, new NodeSpec(7, 5), true, true, false);
}
@Test
public void requireThatSyncIsHonoured() {
assertTrue(getTlsConfig(new TestProperties(), null).usefsync());
assertTrue(getTlsConfig(new TestProperties(), true).usefsync());
assertFalse(getTlsConfig(new TestProperties(), false).usefsync());
}
@Test
public void requireThatBasedirIsCorrectForElasticMode() {
MockRoot root = new MockRoot("");
SearchNode node = createSearchNode(root, "mynode", 3, new NodeSpec(7, 5), false, root.getDeployState().isHosted(), false);
prepare(root, node, true);
assertBaseDir(Defaults.getDefaults().underVespaHome("var/db/vespa/search/cluster.mycluster/n3"), node);
}
@Test
public void requireThatPreShutdownCommandIsEmptyWhenNotActivated() {
MockRoot root = new MockRoot("");
SearchNode node = createSearchNode(root, "mynode", 3, new NodeSpec(7, 5), false, root.getDeployState().isHosted(), false);
node.setHostResource(new HostResource(new Host(node, "mynbode")));
node.initService(root.getDeployState());
assertFalse(node.getPreShutdownCommand().isPresent());
}
@Test
public void requireThatPreShutdownCommandUsesPrepareRestartWhenActivated() {
MockRoot root = new MockRoot("");
SearchNode node = createSearchNode(root, "mynode2", 4, new NodeSpec(7, 5), true, root.getDeployState().isHosted(), false);
node.setHostResource(new HostResource(new Host(node, "mynbode2")));
node.initService(root.getDeployState());
assertTrue(node.getPreShutdownCommand().isPresent());
assertTrue(node.getPreShutdownCommand().get().contains("vespa-proton-cmd " + node.getRpcPort() + " prepareRestart"));
}
@Test
public void requireThatCodePageTypeCanBeControlled() {
verifyCodePlacement(true);
verifyCodePlacement(false);
}
private MockRoot createRoot(ModelContext.Properties properties) {
return new MockRoot("", new DeployState.Builder().properties(properties).build());
}
private TranslogserverConfig getTlsConfig(ModelContext.Properties properties, Boolean useFsync) {
MockRoot root = createRoot(properties);
SearchNode node = createSearchNode(root);
prepare(root, node, useFsync);
TranslogserverConfig.Builder tlsBuilder = new TranslogserverConfig.Builder();
node.getConfig(tlsBuilder);
return tlsBuilder.build();
}
} |
The script only checks if VESPA_LOAD_CODE_AS_HUGEPAGES exists, it can be false, true or whatever. So I intentionally made this check like this. | private void verifyCodePlacement(boolean hugePages) {
MockRoot root = new MockRoot("");
SearchNode node = createSearchNode(root, "mynode2", 4, new NodeSpec(7, 5), true, false, hugePages);
node.setHostResource(new HostResource(new Host(node, "mynbode2")));
node.initService(root.getDeployState());
assertEquals(hugePages, node.getStartupCommand().contains("VESPA_LOAD_CODE_AS_HUGEPAGES="));
} | assertEquals(hugePages, node.getStartupCommand().contains("VESPA_LOAD_CODE_AS_HUGEPAGES=")); | private void verifyCodePlacement(boolean hugePages) {
MockRoot root = new MockRoot("");
SearchNode node = createSearchNode(root, "mynode2", 4, new NodeSpec(7, 5), true, false, hugePages);
node.setHostResource(new HostResource(new Host(node, "mynbode2")));
node.initService(root.getDeployState());
assertEquals(hugePages, node.getStartupCommand().contains("VESPA_LOAD_CODE_AS_HUGEPAGES="));
} | class SearchNodeTest {
private void assertBaseDir(String expected, SearchNode node) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
node.getConfig(builder);
ProtonConfig cfg = new ProtonConfig(builder);
assertEquals(expected, cfg.basedir());
}
private void prepare(MockRoot root, SearchNode node, Boolean useFsync) {
Host host = new Host(root, "mockhost");
TransactionLogServer tls = new TransactionLogServer(root, "mycluster", useFsync);
tls.setHostResource(new HostResource(host));
tls.setBasePort(100);
tls.initService(root.getDeployState());
node.setTls(tls);
node.setHostResource(new HostResource(host));
node.setBasePort(200);
node.initService(root.getDeployState());
root.freezeModelTopology();
}
private static SearchNode createSearchNode(MockRoot root, String name, int distributionKey,
NodeSpec nodeSpec, boolean flushOnShutDown, boolean isHosted, boolean loadCodeAsHugePages) {
return SearchNode.create(root, name, distributionKey, nodeSpec, "mycluster", null, flushOnShutDown,
Optional.empty(), Optional.empty(), isHosted, loadCodeAsHugePages, 0.0);
}
private static SearchNode createSearchNode(MockRoot root) {
return createSearchNode(root, "mynode", 3, new NodeSpec(7, 5), true, true, false);
}
@Test
public void requireThatSyncIsHonoured() {
assertTrue(getTlsConfig(new TestProperties(), null).usefsync());
assertTrue(getTlsConfig(new TestProperties(), true).usefsync());
assertFalse(getTlsConfig(new TestProperties(), false).usefsync());
}
@Test
public void requireThatBasedirIsCorrectForElasticMode() {
MockRoot root = new MockRoot("");
SearchNode node = createSearchNode(root, "mynode", 3, new NodeSpec(7, 5), false, root.getDeployState().isHosted(), false);
prepare(root, node, true);
assertBaseDir(Defaults.getDefaults().underVespaHome("var/db/vespa/search/cluster.mycluster/n3"), node);
}
@Test
public void requireThatPreShutdownCommandIsEmptyWhenNotActivated() {
MockRoot root = new MockRoot("");
SearchNode node = createSearchNode(root, "mynode", 3, new NodeSpec(7, 5), false, root.getDeployState().isHosted(), false);
node.setHostResource(new HostResource(new Host(node, "mynbode")));
node.initService(root.getDeployState());
assertFalse(node.getPreShutdownCommand().isPresent());
}
@Test
public void requireThatPreShutdownCommandUsesPrepareRestartWhenActivated() {
MockRoot root = new MockRoot("");
SearchNode node = createSearchNode(root, "mynode2", 4, new NodeSpec(7, 5), true, root.getDeployState().isHosted(), false);
node.setHostResource(new HostResource(new Host(node, "mynbode2")));
node.initService(root.getDeployState());
assertTrue(node.getPreShutdownCommand().isPresent());
assertTrue(node.getPreShutdownCommand().get().contains("vespa-proton-cmd " + node.getRpcPort() + " prepareRestart"));
}
@Test
public void requireThatCodePageTypeCanBeControlled() {
verifyCodePlacement(true);
verifyCodePlacement(false);
}
private MockRoot createRoot(ModelContext.Properties properties) {
return new MockRoot("", new DeployState.Builder().properties(properties).build());
}
private TranslogserverConfig getTlsConfig(ModelContext.Properties properties, Boolean useFsync) {
MockRoot root = createRoot(properties);
SearchNode node = createSearchNode(root);
prepare(root, node, useFsync);
TranslogserverConfig.Builder tlsBuilder = new TranslogserverConfig.Builder();
node.getConfig(tlsBuilder);
return tlsBuilder.build();
}
} | class SearchNodeTest {
private void assertBaseDir(String expected, SearchNode node) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
node.getConfig(builder);
ProtonConfig cfg = new ProtonConfig(builder);
assertEquals(expected, cfg.basedir());
}
private void prepare(MockRoot root, SearchNode node, Boolean useFsync) {
Host host = new Host(root, "mockhost");
TransactionLogServer tls = new TransactionLogServer(root, "mycluster", useFsync);
tls.setHostResource(new HostResource(host));
tls.setBasePort(100);
tls.initService(root.getDeployState());
node.setTls(tls);
node.setHostResource(new HostResource(host));
node.setBasePort(200);
node.initService(root.getDeployState());
root.freezeModelTopology();
}
private static SearchNode createSearchNode(MockRoot root, String name, int distributionKey,
NodeSpec nodeSpec, boolean flushOnShutDown, boolean isHosted, boolean loadCodeAsHugePages) {
return SearchNode.create(root, name, distributionKey, nodeSpec, "mycluster", null, flushOnShutDown,
Optional.empty(), Optional.empty(), isHosted, loadCodeAsHugePages, 0.0);
}
private static SearchNode createSearchNode(MockRoot root) {
return createSearchNode(root, "mynode", 3, new NodeSpec(7, 5), true, true, false);
}
@Test
public void requireThatSyncIsHonoured() {
assertTrue(getTlsConfig(new TestProperties(), null).usefsync());
assertTrue(getTlsConfig(new TestProperties(), true).usefsync());
assertFalse(getTlsConfig(new TestProperties(), false).usefsync());
}
@Test
public void requireThatBasedirIsCorrectForElasticMode() {
MockRoot root = new MockRoot("");
SearchNode node = createSearchNode(root, "mynode", 3, new NodeSpec(7, 5), false, root.getDeployState().isHosted(), false);
prepare(root, node, true);
assertBaseDir(Defaults.getDefaults().underVespaHome("var/db/vespa/search/cluster.mycluster/n3"), node);
}
@Test
public void requireThatPreShutdownCommandIsEmptyWhenNotActivated() {
MockRoot root = new MockRoot("");
SearchNode node = createSearchNode(root, "mynode", 3, new NodeSpec(7, 5), false, root.getDeployState().isHosted(), false);
node.setHostResource(new HostResource(new Host(node, "mynbode")));
node.initService(root.getDeployState());
assertFalse(node.getPreShutdownCommand().isPresent());
}
@Test
public void requireThatPreShutdownCommandUsesPrepareRestartWhenActivated() {
MockRoot root = new MockRoot("");
SearchNode node = createSearchNode(root, "mynode2", 4, new NodeSpec(7, 5), true, root.getDeployState().isHosted(), false);
node.setHostResource(new HostResource(new Host(node, "mynbode2")));
node.initService(root.getDeployState());
assertTrue(node.getPreShutdownCommand().isPresent());
assertTrue(node.getPreShutdownCommand().get().contains("vespa-proton-cmd " + node.getRpcPort() + " prepareRestart"));
}
@Test
public void requireThatCodePageTypeCanBeControlled() {
verifyCodePlacement(true);
verifyCodePlacement(false);
}
private MockRoot createRoot(ModelContext.Properties properties) {
return new MockRoot("", new DeployState.Builder().properties(properties).build());
}
private TranslogserverConfig getTlsConfig(ModelContext.Properties properties, Boolean useFsync) {
MockRoot root = createRoot(properties);
SearchNode node = createSearchNode(root);
prepare(root, node, useFsync);
TranslogserverConfig.Builder tlsBuilder = new TranslogserverConfig.Builder();
node.getConfig(tlsBuilder);
return tlsBuilder.build();
}
} |
This is used so that test is not run on arm64, where we have no support for onnx at the moment | public void testModelsEvaluator() {
ModelsEvaluator modelsEvaluator = ModelsEvaluatorTester.create("src/test/cfg/application/stateless_eval");
assertEquals(3, modelsEvaluator.models().size());
FunctionEvaluator mul = modelsEvaluator.evaluatorOf("mul");
Tensor input1 = Tensor.from("tensor<float>(d0[1]):[2]");
Tensor input2 = Tensor.from("tensor<float>(d0[1]):[3]");
Tensor output = mul.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(6.0, output.sum().asDouble(), 1e-9);
FunctionEvaluator lgbm = modelsEvaluator.evaluatorOf("lightgbm_regression");
lgbm.bind("numerical_1", 0.1).bind("numerical_2", 0.2).bind("categorical_1", "a").bind("categorical_2", "i");
output = lgbm.evaluate();
assertEquals(2.0547, output.sum().asDouble(), 1e-4);
FunctionEvaluator foo1 = modelsEvaluator.evaluatorOf("example", "foo1");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo1.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
FunctionEvaluator foo2 = modelsEvaluator.evaluatorOf("example", "foo2");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo2.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
} | ModelsEvaluator modelsEvaluator = ModelsEvaluatorTester.create("src/test/cfg/application/stateless_eval"); | public void testModelsEvaluator() {
ModelsEvaluator modelsEvaluator = ModelsEvaluatorTester.create("src/test/cfg/application/stateless_eval");
assertEquals(3, modelsEvaluator.models().size());
FunctionEvaluator mul = modelsEvaluator.evaluatorOf("mul");
Tensor input1 = Tensor.from("tensor<float>(d0[1]):[2]");
Tensor input2 = Tensor.from("tensor<float>(d0[1]):[3]");
Tensor output = mul.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(6.0, output.sum().asDouble(), 1e-9);
FunctionEvaluator lgbm = modelsEvaluator.evaluatorOf("lightgbm_regression");
lgbm.bind("numerical_1", 0.1).bind("numerical_2", 0.2).bind("categorical_1", "a").bind("categorical_2", "i");
output = lgbm.evaluate();
assertEquals(2.0547, output.sum().asDouble(), 1e-4);
FunctionEvaluator foo1 = modelsEvaluator.evaluatorOf("example", "foo1");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo1.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
FunctionEvaluator foo2 = modelsEvaluator.evaluatorOf("example", "foo2");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo2.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
} | class ModelsEvaluatorTest {
@Test
} | class ModelsEvaluatorTest {
@Test
} |
Thanks. Locally, the assumption failed but the test worked ... | public void testModelsEvaluator() {
ModelsEvaluator modelsEvaluator = ModelsEvaluatorTester.create("src/test/cfg/application/stateless_eval");
assertEquals(3, modelsEvaluator.models().size());
FunctionEvaluator mul = modelsEvaluator.evaluatorOf("mul");
Tensor input1 = Tensor.from("tensor<float>(d0[1]):[2]");
Tensor input2 = Tensor.from("tensor<float>(d0[1]):[3]");
Tensor output = mul.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(6.0, output.sum().asDouble(), 1e-9);
FunctionEvaluator lgbm = modelsEvaluator.evaluatorOf("lightgbm_regression");
lgbm.bind("numerical_1", 0.1).bind("numerical_2", 0.2).bind("categorical_1", "a").bind("categorical_2", "i");
output = lgbm.evaluate();
assertEquals(2.0547, output.sum().asDouble(), 1e-4);
FunctionEvaluator foo1 = modelsEvaluator.evaluatorOf("example", "foo1");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo1.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
FunctionEvaluator foo2 = modelsEvaluator.evaluatorOf("example", "foo2");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo2.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
} | ModelsEvaluator modelsEvaluator = ModelsEvaluatorTester.create("src/test/cfg/application/stateless_eval"); | public void testModelsEvaluator() {
ModelsEvaluator modelsEvaluator = ModelsEvaluatorTester.create("src/test/cfg/application/stateless_eval");
assertEquals(3, modelsEvaluator.models().size());
FunctionEvaluator mul = modelsEvaluator.evaluatorOf("mul");
Tensor input1 = Tensor.from("tensor<float>(d0[1]):[2]");
Tensor input2 = Tensor.from("tensor<float>(d0[1]):[3]");
Tensor output = mul.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(6.0, output.sum().asDouble(), 1e-9);
FunctionEvaluator lgbm = modelsEvaluator.evaluatorOf("lightgbm_regression");
lgbm.bind("numerical_1", 0.1).bind("numerical_2", 0.2).bind("categorical_1", "a").bind("categorical_2", "i");
output = lgbm.evaluate();
assertEquals(2.0547, output.sum().asDouble(), 1e-4);
FunctionEvaluator foo1 = modelsEvaluator.evaluatorOf("example", "foo1");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo1.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
FunctionEvaluator foo2 = modelsEvaluator.evaluatorOf("example", "foo2");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo2.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
} | class ModelsEvaluatorTest {
@Test
} | class ModelsEvaluatorTest {
@Test
} |
Confusing comment, so this test always fails? | public void testModelsEvaluator() {
assumeTrue(OnnxEvaluator.isRuntimeAvailable());
ModelsEvaluator modelsEvaluator = ModelsEvaluatorTester.create("src/test/cfg/application/stateless_eval");
assertEquals(3, modelsEvaluator.models().size());
FunctionEvaluator mul = modelsEvaluator.evaluatorOf("mul");
Tensor input1 = Tensor.from("tensor<float>(d0[1]):[2]");
Tensor input2 = Tensor.from("tensor<float>(d0[1]):[3]");
Tensor output = mul.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(6.0, output.sum().asDouble(), 1e-9);
FunctionEvaluator lgbm = modelsEvaluator.evaluatorOf("lightgbm_regression");
lgbm.bind("numerical_1", 0.1).bind("numerical_2", 0.2).bind("categorical_1", "a").bind("categorical_2", "i");
output = lgbm.evaluate();
assertEquals(2.0547, output.sum().asDouble(), 1e-4);
FunctionEvaluator foo1 = modelsEvaluator.evaluatorOf("example", "foo1");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo1.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
FunctionEvaluator foo2 = modelsEvaluator.evaluatorOf("example", "foo2");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo2.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
} | public void testModelsEvaluator() {
assumeTrue(OnnxEvaluator.isRuntimeAvailable());
ModelsEvaluator modelsEvaluator = ModelsEvaluatorTester.create("src/test/cfg/application/stateless_eval");
assertEquals(3, modelsEvaluator.models().size());
FunctionEvaluator mul = modelsEvaluator.evaluatorOf("mul");
Tensor input1 = Tensor.from("tensor<float>(d0[1]):[2]");
Tensor input2 = Tensor.from("tensor<float>(d0[1]):[3]");
Tensor output = mul.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(6.0, output.sum().asDouble(), 1e-9);
FunctionEvaluator lgbm = modelsEvaluator.evaluatorOf("lightgbm_regression");
lgbm.bind("numerical_1", 0.1).bind("numerical_2", 0.2).bind("categorical_1", "a").bind("categorical_2", "i");
output = lgbm.evaluate();
assertEquals(2.0547, output.sum().asDouble(), 1e-4);
FunctionEvaluator foo1 = modelsEvaluator.evaluatorOf("example", "foo1");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo1.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
FunctionEvaluator foo2 = modelsEvaluator.evaluatorOf("example", "foo2");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo2.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
} | class ModelsEvaluatorTest {
@Test
} | class ModelsEvaluatorTest {
@Test
} | |
It says "test passes" so I don't know how you summarize it as "always fails". Anyway, it leaves out linux intel, where (I assume) both the assumption and test succeeds. | public void testModelsEvaluator() {
assumeTrue(OnnxEvaluator.isRuntimeAvailable());
ModelsEvaluator modelsEvaluator = ModelsEvaluatorTester.create("src/test/cfg/application/stateless_eval");
assertEquals(3, modelsEvaluator.models().size());
FunctionEvaluator mul = modelsEvaluator.evaluatorOf("mul");
Tensor input1 = Tensor.from("tensor<float>(d0[1]):[2]");
Tensor input2 = Tensor.from("tensor<float>(d0[1]):[3]");
Tensor output = mul.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(6.0, output.sum().asDouble(), 1e-9);
FunctionEvaluator lgbm = modelsEvaluator.evaluatorOf("lightgbm_regression");
lgbm.bind("numerical_1", 0.1).bind("numerical_2", 0.2).bind("categorical_1", "a").bind("categorical_2", "i");
output = lgbm.evaluate();
assertEquals(2.0547, output.sum().asDouble(), 1e-4);
FunctionEvaluator foo1 = modelsEvaluator.evaluatorOf("example", "foo1");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo1.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
FunctionEvaluator foo2 = modelsEvaluator.evaluatorOf("example", "foo2");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo2.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
} | public void testModelsEvaluator() {
assumeTrue(OnnxEvaluator.isRuntimeAvailable());
ModelsEvaluator modelsEvaluator = ModelsEvaluatorTester.create("src/test/cfg/application/stateless_eval");
assertEquals(3, modelsEvaluator.models().size());
FunctionEvaluator mul = modelsEvaluator.evaluatorOf("mul");
Tensor input1 = Tensor.from("tensor<float>(d0[1]):[2]");
Tensor input2 = Tensor.from("tensor<float>(d0[1]):[3]");
Tensor output = mul.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(6.0, output.sum().asDouble(), 1e-9);
FunctionEvaluator lgbm = modelsEvaluator.evaluatorOf("lightgbm_regression");
lgbm.bind("numerical_1", 0.1).bind("numerical_2", 0.2).bind("categorical_1", "a").bind("categorical_2", "i");
output = lgbm.evaluate();
assertEquals(2.0547, output.sum().asDouble(), 1e-4);
FunctionEvaluator foo1 = modelsEvaluator.evaluatorOf("example", "foo1");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo1.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
FunctionEvaluator foo2 = modelsEvaluator.evaluatorOf("example", "foo2");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo2.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
} | class ModelsEvaluatorTest {
@Test
} | class ModelsEvaluatorTest {
@Test
} | |
I understand "assumption fails" as that `assumeTrue` fails, and it was confusing how the test then can pass? | public void testModelsEvaluator() {
assumeTrue(OnnxEvaluator.isRuntimeAvailable());
ModelsEvaluator modelsEvaluator = ModelsEvaluatorTester.create("src/test/cfg/application/stateless_eval");
assertEquals(3, modelsEvaluator.models().size());
FunctionEvaluator mul = modelsEvaluator.evaluatorOf("mul");
Tensor input1 = Tensor.from("tensor<float>(d0[1]):[2]");
Tensor input2 = Tensor.from("tensor<float>(d0[1]):[3]");
Tensor output = mul.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(6.0, output.sum().asDouble(), 1e-9);
FunctionEvaluator lgbm = modelsEvaluator.evaluatorOf("lightgbm_regression");
lgbm.bind("numerical_1", 0.1).bind("numerical_2", 0.2).bind("categorical_1", "a").bind("categorical_2", "i");
output = lgbm.evaluate();
assertEquals(2.0547, output.sum().asDouble(), 1e-4);
FunctionEvaluator foo1 = modelsEvaluator.evaluatorOf("example", "foo1");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo1.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
FunctionEvaluator foo2 = modelsEvaluator.evaluatorOf("example", "foo2");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo2.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
} | public void testModelsEvaluator() {
assumeTrue(OnnxEvaluator.isRuntimeAvailable());
ModelsEvaluator modelsEvaluator = ModelsEvaluatorTester.create("src/test/cfg/application/stateless_eval");
assertEquals(3, modelsEvaluator.models().size());
FunctionEvaluator mul = modelsEvaluator.evaluatorOf("mul");
Tensor input1 = Tensor.from("tensor<float>(d0[1]):[2]");
Tensor input2 = Tensor.from("tensor<float>(d0[1]):[3]");
Tensor output = mul.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(6.0, output.sum().asDouble(), 1e-9);
FunctionEvaluator lgbm = modelsEvaluator.evaluatorOf("lightgbm_regression");
lgbm.bind("numerical_1", 0.1).bind("numerical_2", 0.2).bind("categorical_1", "a").bind("categorical_2", "i");
output = lgbm.evaluate();
assertEquals(2.0547, output.sum().asDouble(), 1e-4);
FunctionEvaluator foo1 = modelsEvaluator.evaluatorOf("example", "foo1");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo1.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
FunctionEvaluator foo2 = modelsEvaluator.evaluatorOf("example", "foo2");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo2.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
} | class ModelsEvaluatorTest {
@Test
} | class ModelsEvaluatorTest {
@Test
} | |
Yes, that was surprising to me as well. I guess OnnxEvaluator.isRuntimeAvailable() is wrong. | public void testModelsEvaluator() {
assumeTrue(OnnxEvaluator.isRuntimeAvailable());
ModelsEvaluator modelsEvaluator = ModelsEvaluatorTester.create("src/test/cfg/application/stateless_eval");
assertEquals(3, modelsEvaluator.models().size());
FunctionEvaluator mul = modelsEvaluator.evaluatorOf("mul");
Tensor input1 = Tensor.from("tensor<float>(d0[1]):[2]");
Tensor input2 = Tensor.from("tensor<float>(d0[1]):[3]");
Tensor output = mul.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(6.0, output.sum().asDouble(), 1e-9);
FunctionEvaluator lgbm = modelsEvaluator.evaluatorOf("lightgbm_regression");
lgbm.bind("numerical_1", 0.1).bind("numerical_2", 0.2).bind("categorical_1", "a").bind("categorical_2", "i");
output = lgbm.evaluate();
assertEquals(2.0547, output.sum().asDouble(), 1e-4);
FunctionEvaluator foo1 = modelsEvaluator.evaluatorOf("example", "foo1");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo1.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
FunctionEvaluator foo2 = modelsEvaluator.evaluatorOf("example", "foo2");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo2.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
} | public void testModelsEvaluator() {
assumeTrue(OnnxEvaluator.isRuntimeAvailable());
ModelsEvaluator modelsEvaluator = ModelsEvaluatorTester.create("src/test/cfg/application/stateless_eval");
assertEquals(3, modelsEvaluator.models().size());
FunctionEvaluator mul = modelsEvaluator.evaluatorOf("mul");
Tensor input1 = Tensor.from("tensor<float>(d0[1]):[2]");
Tensor input2 = Tensor.from("tensor<float>(d0[1]):[3]");
Tensor output = mul.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(6.0, output.sum().asDouble(), 1e-9);
FunctionEvaluator lgbm = modelsEvaluator.evaluatorOf("lightgbm_regression");
lgbm.bind("numerical_1", 0.1).bind("numerical_2", 0.2).bind("categorical_1", "a").bind("categorical_2", "i");
output = lgbm.evaluate();
assertEquals(2.0547, output.sum().asDouble(), 1e-4);
FunctionEvaluator foo1 = modelsEvaluator.evaluatorOf("example", "foo1");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo1.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
FunctionEvaluator foo2 = modelsEvaluator.evaluatorOf("example", "foo2");
input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }");
input2 = Tensor.from("tensor(x[3]):[2,3,4]");
output = foo2.bind("input1", input1).bind("input2", input2).evaluate();
assertEquals(90, output.asDouble(), 1e-9);
} | class ModelsEvaluatorTest {
@Test
} | class ModelsEvaluatorTest {
@Test
} | |
Used in any system tests? | public void testValidOptions() {
ClientParameters params = getParsedOptions(
"--fieldset", "[fieldset]",
"--route", "dummyroute",
"--configid", "dummyconfig",
"--showdocsize",
"--timeout", "0.25",
"--noretry",
"--trace", "1",
"--priority", Integer.toString(DocumentProtocol.Priority.HIGH_3.getValue()),
"id:1", "id:2"
);
assertEquals("[fieldset]", params.fieldSet);
assertEquals("dummyroute", params.route);
assertEquals("dummyconfig", params.configId);
assertTrue(params.showDocSize);
assertEquals(0.25, params.timeout, 0.0001);
assertTrue(params.noRetry);
assertEquals(1, params.traceLevel);
assertEquals(DocumentProtocol.Priority.HIGH_3, params.priority);
Iterator<String> documentsIds = params.documentIds;
assertEquals("id:1", documentsIds.next());
assertEquals("id:2", documentsIds.next());
assertFalse(documentsIds.hasNext());
} | ); | public void testValidOptions() {
ClientParameters params = getParsedOptions(
"--fieldset", "[fieldset]",
"--route", "dummyroute",
"--configid", "dummyconfig",
"--showdocsize",
"--timeout", "0.25",
"--noretry",
"--trace", "1",
"--priority", Integer.toString(DocumentProtocol.Priority.HIGH_3.getValue()),
"id:1", "id:2"
);
assertEquals("[fieldset]", params.fieldSet);
assertEquals("dummyroute", params.route);
assertEquals("dummyconfig", params.configId);
assertTrue(params.showDocSize);
assertEquals(0.25, params.timeout, 0.0001);
assertTrue(params.noRetry);
assertEquals(1, params.traceLevel);
assertEquals(DocumentProtocol.Priority.HIGH_3, params.priority);
Iterator<String> documentsIds = params.documentIds;
assertEquals("id:1", documentsIds.next());
assertEquals("id:2", documentsIds.next());
assertFalse(documentsIds.hasNext());
} | class CommandLineOptionsTest {
private final InputStream emptyStream = new InputStream() {
@Override
public int read() throws IOException {
return -1;
}
};
@SuppressWarnings("deprecation")
@Rule
public final ExpectedException exception = ExpectedException.none();
private ClientParameters getParsedOptions(InputStream in, String... args) {
CommandLineOptions options = new CommandLineOptions(in);
return options.parseCommandLineArguments(args);
}
private ClientParameters getParsedOptions(String... args) {
return getParsedOptions(emptyStream, args);
}
@Test
public void testDefaultOptions() {
ClientParameters params = getParsedOptions();
assertFalse(params.help);
assertFalse(params.documentIds.hasNext());
assertFalse(params.printIdsOnly);
assertEquals(AllFields.NAME, params.fieldSet);
assertEquals("default-get", params.route);
assertTrue(params.cluster.isEmpty());
assertEquals("client", params.configId);
assertFalse(params.showDocSize);
assertEquals(0, params.timeout, 0);
assertFalse(params.noRetry);
assertEquals(0, params.traceLevel);
assertEquals(DocumentProtocol.Priority.NORMAL_2, params.priority);
}
@Test
@Test
public void testInvalidCombination3() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage("Field set option can not be used in combination with print ids option.");
getParsedOptions("--printids", "--fieldset", AllFields.NAME);
}
@Test
public void testInvalidCombination4() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage("Cluster and route options are mutually exclusive.");
getParsedOptions("--route", "dummyroute", "--cluster", "dummycluster");
}
@Test
public void testInvalidPriority() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage("Invalid priority: 16");
getParsedOptions("--priority", "16");
}
@Test
public void TestHighestPriority() {
ClientParameters params = getParsedOptions("--priority", "HIGHEST");
assertEquals(DocumentProtocol.Priority.HIGHEST, params.priority);
}
@Test
public void TestHigh1PriorityAsNumber() {
ClientParameters params = getParsedOptions("--priority", "2");
assertEquals(DocumentProtocol.Priority.HIGH_1, params.priority);
}
@Test
public void testInvalidTraceLevel1() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage("Invalid tracelevel: -1");
getParsedOptions("--trace", "-1");
}
@Test
public void testInvalidTraceLevel2() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage("Invalid tracelevel: 10");
getParsedOptions("--trace", "10");
}
@Test
public void testPrintids() {
ClientParameters params = getParsedOptions("--printids");
assertEquals(DocIdOnly.NAME, params.fieldSet);
}
@Test
public void testCluster() {
ClientParameters params = getParsedOptions("--cluster", "dummycluster");
assertEquals("dummycluster", params.cluster);
assertTrue(params.route.isEmpty());
}
@Test
public void testHelp() {
ClientParameters params = getParsedOptions("--help");
assertTrue(params.help);
}
@Test
public void testDocumentIdsFromInputStream() throws UnsupportedEncodingException {
InputStream in = new ByteArrayInputStream("id:1 id:2 id:3".getBytes("UTF-8"));
ClientParameters params = getParsedOptions(in, "");
Iterator<String> documentsIds = params.documentIds;
assertEquals("id:1", documentsIds.next());
assertEquals("id:2", documentsIds.next());
assertEquals("id:3", documentsIds.next());
assertFalse(documentsIds.hasNext());
}
@Test
public void testPrintHelp() {
ByteArrayOutputStream outContent = new ByteArrayOutputStream();
PrintStream oldOut = System.out;
System.setOut(new PrintStream(outContent));
try {
CommandLineOptions options = new CommandLineOptions(emptyStream);
options.printHelp();
String output = outContent.toString();
assertTrue(output.contains("vespa-get <options> [documentid...]"));
assertTrue(output.contains("Fetch a document from a Vespa Content cluster."));
} finally {
System.setOut(oldOut);
outContent.reset();
}
}
} | class CommandLineOptionsTest {
private final InputStream emptyStream = new InputStream() {
@Override
public int read() throws IOException {
return -1;
}
};
@SuppressWarnings("deprecation")
@Rule
public final ExpectedException exception = ExpectedException.none();
private ClientParameters getParsedOptions(InputStream in, String... args) {
CommandLineOptions options = new CommandLineOptions(in);
return options.parseCommandLineArguments(args);
}
private ClientParameters getParsedOptions(String... args) {
return getParsedOptions(emptyStream, args);
}
@Test
public void testDefaultOptions() {
ClientParameters params = getParsedOptions();
assertFalse(params.help);
assertFalse(params.documentIds.hasNext());
assertFalse(params.printIdsOnly);
assertEquals(AllFields.NAME, params.fieldSet);
assertEquals("default-get", params.route);
assertTrue(params.cluster.isEmpty());
assertEquals("client", params.configId);
assertFalse(params.showDocSize);
assertEquals(0, params.timeout, 0);
assertFalse(params.noRetry);
assertEquals(0, params.traceLevel);
assertEquals(DocumentProtocol.Priority.NORMAL_2, params.priority);
}
@Test
@Test
public void testInvalidCombination3() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage("Field set option can not be used in combination with print ids option.");
getParsedOptions("--printids", "--fieldset", AllFields.NAME);
}
@Test
public void testInvalidCombination4() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage("Cluster and route options are mutually exclusive.");
getParsedOptions("--route", "dummyroute", "--cluster", "dummycluster");
}
@Test
public void testInvalidPriority() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage("Invalid priority: 16");
getParsedOptions("--priority", "16");
}
@Test
public void TestHighestPriority() {
ClientParameters params = getParsedOptions("--priority", "HIGHEST");
assertEquals(DocumentProtocol.Priority.HIGHEST, params.priority);
}
@Test
public void TestHigh1PriorityAsNumber() {
ClientParameters params = getParsedOptions("--priority", "2");
assertEquals(DocumentProtocol.Priority.HIGH_1, params.priority);
}
@Test
public void testInvalidTraceLevel1() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage("Invalid tracelevel: -1");
getParsedOptions("--trace", "-1");
}
@Test
public void testInvalidTraceLevel2() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage("Invalid tracelevel: 10");
getParsedOptions("--trace", "10");
}
@Test
public void testPrintids() {
ClientParameters params = getParsedOptions("--printids");
assertEquals(DocIdOnly.NAME, params.fieldSet);
}
@Test
public void testCluster() {
ClientParameters params = getParsedOptions("--cluster", "dummycluster");
assertEquals("dummycluster", params.cluster);
assertTrue(params.route.isEmpty());
}
@Test
public void testHelp() {
ClientParameters params = getParsedOptions("--help");
assertTrue(params.help);
}
@Test
public void testDocumentIdsFromInputStream() throws UnsupportedEncodingException {
InputStream in = new ByteArrayInputStream("id:1 id:2 id:3".getBytes("UTF-8"));
ClientParameters params = getParsedOptions(in, "");
Iterator<String> documentsIds = params.documentIds;
assertEquals("id:1", documentsIds.next());
assertEquals("id:2", documentsIds.next());
assertEquals("id:3", documentsIds.next());
assertFalse(documentsIds.hasNext());
}
@Test
public void testPrintHelp() {
ByteArrayOutputStream outContent = new ByteArrayOutputStream();
PrintStream oldOut = System.out;
System.setOut(new PrintStream(outContent));
try {
CommandLineOptions options = new CommandLineOptions(emptyStream);
options.printHelp();
String output = outContent.toString();
assertTrue(output.contains("vespa-get <options> [documentid...]"));
assertTrue(output.contains("Fetch a document from a Vespa Content cluster."));
} finally {
System.setOut(oldOut);
outContent.reset();
}
}
} |
I see there is a lone load types test under `tests/vds`. Dropkicking it in https://github.com/vespa-engine/system-test/pull/2336. I'll wait to merge this PR until the test is removed to avoid any unnecessary breakage. | public void testValidOptions() {
ClientParameters params = getParsedOptions(
"--fieldset", "[fieldset]",
"--route", "dummyroute",
"--configid", "dummyconfig",
"--showdocsize",
"--timeout", "0.25",
"--noretry",
"--trace", "1",
"--priority", Integer.toString(DocumentProtocol.Priority.HIGH_3.getValue()),
"id:1", "id:2"
);
assertEquals("[fieldset]", params.fieldSet);
assertEquals("dummyroute", params.route);
assertEquals("dummyconfig", params.configId);
assertTrue(params.showDocSize);
assertEquals(0.25, params.timeout, 0.0001);
assertTrue(params.noRetry);
assertEquals(1, params.traceLevel);
assertEquals(DocumentProtocol.Priority.HIGH_3, params.priority);
Iterator<String> documentsIds = params.documentIds;
assertEquals("id:1", documentsIds.next());
assertEquals("id:2", documentsIds.next());
assertFalse(documentsIds.hasNext());
} | ); | public void testValidOptions() {
ClientParameters params = getParsedOptions(
"--fieldset", "[fieldset]",
"--route", "dummyroute",
"--configid", "dummyconfig",
"--showdocsize",
"--timeout", "0.25",
"--noretry",
"--trace", "1",
"--priority", Integer.toString(DocumentProtocol.Priority.HIGH_3.getValue()),
"id:1", "id:2"
);
assertEquals("[fieldset]", params.fieldSet);
assertEquals("dummyroute", params.route);
assertEquals("dummyconfig", params.configId);
assertTrue(params.showDocSize);
assertEquals(0.25, params.timeout, 0.0001);
assertTrue(params.noRetry);
assertEquals(1, params.traceLevel);
assertEquals(DocumentProtocol.Priority.HIGH_3, params.priority);
Iterator<String> documentsIds = params.documentIds;
assertEquals("id:1", documentsIds.next());
assertEquals("id:2", documentsIds.next());
assertFalse(documentsIds.hasNext());
} | class CommandLineOptionsTest {
private final InputStream emptyStream = new InputStream() {
@Override
public int read() throws IOException {
return -1;
}
};
@SuppressWarnings("deprecation")
@Rule
public final ExpectedException exception = ExpectedException.none();
private ClientParameters getParsedOptions(InputStream in, String... args) {
CommandLineOptions options = new CommandLineOptions(in);
return options.parseCommandLineArguments(args);
}
private ClientParameters getParsedOptions(String... args) {
return getParsedOptions(emptyStream, args);
}
@Test
public void testDefaultOptions() {
ClientParameters params = getParsedOptions();
assertFalse(params.help);
assertFalse(params.documentIds.hasNext());
assertFalse(params.printIdsOnly);
assertEquals(AllFields.NAME, params.fieldSet);
assertEquals("default-get", params.route);
assertTrue(params.cluster.isEmpty());
assertEquals("client", params.configId);
assertFalse(params.showDocSize);
assertEquals(0, params.timeout, 0);
assertFalse(params.noRetry);
assertEquals(0, params.traceLevel);
assertEquals(DocumentProtocol.Priority.NORMAL_2, params.priority);
}
@Test
@Test
public void testInvalidCombination3() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage("Field set option can not be used in combination with print ids option.");
getParsedOptions("--printids", "--fieldset", AllFields.NAME);
}
@Test
public void testInvalidCombination4() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage("Cluster and route options are mutually exclusive.");
getParsedOptions("--route", "dummyroute", "--cluster", "dummycluster");
}
@Test
public void testInvalidPriority() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage("Invalid priority: 16");
getParsedOptions("--priority", "16");
}
@Test
public void TestHighestPriority() {
ClientParameters params = getParsedOptions("--priority", "HIGHEST");
assertEquals(DocumentProtocol.Priority.HIGHEST, params.priority);
}
@Test
public void TestHigh1PriorityAsNumber() {
ClientParameters params = getParsedOptions("--priority", "2");
assertEquals(DocumentProtocol.Priority.HIGH_1, params.priority);
}
@Test
public void testInvalidTraceLevel1() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage("Invalid tracelevel: -1");
getParsedOptions("--trace", "-1");
}
@Test
public void testInvalidTraceLevel2() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage("Invalid tracelevel: 10");
getParsedOptions("--trace", "10");
}
@Test
public void testPrintids() {
ClientParameters params = getParsedOptions("--printids");
assertEquals(DocIdOnly.NAME, params.fieldSet);
}
@Test
public void testCluster() {
ClientParameters params = getParsedOptions("--cluster", "dummycluster");
assertEquals("dummycluster", params.cluster);
assertTrue(params.route.isEmpty());
}
@Test
public void testHelp() {
ClientParameters params = getParsedOptions("--help");
assertTrue(params.help);
}
@Test
public void testDocumentIdsFromInputStream() throws UnsupportedEncodingException {
InputStream in = new ByteArrayInputStream("id:1 id:2 id:3".getBytes("UTF-8"));
ClientParameters params = getParsedOptions(in, "");
Iterator<String> documentsIds = params.documentIds;
assertEquals("id:1", documentsIds.next());
assertEquals("id:2", documentsIds.next());
assertEquals("id:3", documentsIds.next());
assertFalse(documentsIds.hasNext());
}
@Test
public void testPrintHelp() {
ByteArrayOutputStream outContent = new ByteArrayOutputStream();
PrintStream oldOut = System.out;
System.setOut(new PrintStream(outContent));
try {
CommandLineOptions options = new CommandLineOptions(emptyStream);
options.printHelp();
String output = outContent.toString();
assertTrue(output.contains("vespa-get <options> [documentid...]"));
assertTrue(output.contains("Fetch a document from a Vespa Content cluster."));
} finally {
System.setOut(oldOut);
outContent.reset();
}
}
} | class CommandLineOptionsTest {
private final InputStream emptyStream = new InputStream() {
@Override
public int read() throws IOException {
return -1;
}
};
@SuppressWarnings("deprecation")
@Rule
public final ExpectedException exception = ExpectedException.none();
private ClientParameters getParsedOptions(InputStream in, String... args) {
CommandLineOptions options = new CommandLineOptions(in);
return options.parseCommandLineArguments(args);
}
private ClientParameters getParsedOptions(String... args) {
return getParsedOptions(emptyStream, args);
}
@Test
public void testDefaultOptions() {
ClientParameters params = getParsedOptions();
assertFalse(params.help);
assertFalse(params.documentIds.hasNext());
assertFalse(params.printIdsOnly);
assertEquals(AllFields.NAME, params.fieldSet);
assertEquals("default-get", params.route);
assertTrue(params.cluster.isEmpty());
assertEquals("client", params.configId);
assertFalse(params.showDocSize);
assertEquals(0, params.timeout, 0);
assertFalse(params.noRetry);
assertEquals(0, params.traceLevel);
assertEquals(DocumentProtocol.Priority.NORMAL_2, params.priority);
}
@Test
@Test
public void testInvalidCombination3() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage("Field set option can not be used in combination with print ids option.");
getParsedOptions("--printids", "--fieldset", AllFields.NAME);
}
@Test
public void testInvalidCombination4() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage("Cluster and route options are mutually exclusive.");
getParsedOptions("--route", "dummyroute", "--cluster", "dummycluster");
}
@Test
public void testInvalidPriority() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage("Invalid priority: 16");
getParsedOptions("--priority", "16");
}
@Test
public void TestHighestPriority() {
ClientParameters params = getParsedOptions("--priority", "HIGHEST");
assertEquals(DocumentProtocol.Priority.HIGHEST, params.priority);
}
@Test
public void TestHigh1PriorityAsNumber() {
ClientParameters params = getParsedOptions("--priority", "2");
assertEquals(DocumentProtocol.Priority.HIGH_1, params.priority);
}
@Test
public void testInvalidTraceLevel1() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage("Invalid tracelevel: -1");
getParsedOptions("--trace", "-1");
}
@Test
public void testInvalidTraceLevel2() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage("Invalid tracelevel: 10");
getParsedOptions("--trace", "10");
}
@Test
public void testPrintids() {
ClientParameters params = getParsedOptions("--printids");
assertEquals(DocIdOnly.NAME, params.fieldSet);
}
@Test
public void testCluster() {
ClientParameters params = getParsedOptions("--cluster", "dummycluster");
assertEquals("dummycluster", params.cluster);
assertTrue(params.route.isEmpty());
}
@Test
public void testHelp() {
ClientParameters params = getParsedOptions("--help");
assertTrue(params.help);
}
@Test
public void testDocumentIdsFromInputStream() throws UnsupportedEncodingException {
InputStream in = new ByteArrayInputStream("id:1 id:2 id:3".getBytes("UTF-8"));
ClientParameters params = getParsedOptions(in, "");
Iterator<String> documentsIds = params.documentIds;
assertEquals("id:1", documentsIds.next());
assertEquals("id:2", documentsIds.next());
assertEquals("id:3", documentsIds.next());
assertFalse(documentsIds.hasNext());
}
@Test
public void testPrintHelp() {
ByteArrayOutputStream outContent = new ByteArrayOutputStream();
PrintStream oldOut = System.out;
System.setOut(new PrintStream(outContent));
try {
CommandLineOptions options = new CommandLineOptions(emptyStream);
options.printHelp();
String output = outContent.toString();
assertTrue(output.contains("vespa-get <options> [documentid...]"));
assertTrue(output.contains("Fetch a document from a Vespa Content cluster."));
} finally {
System.setOut(oldOut);
outContent.reset();
}
}
} |
Should the version be updated since there was a new release today? | public void updateVespaLog(RunId id) {
locked(id, run -> {
if ( ! run.hasStep(copyVespaLogs))
return run;
ZoneId zone = id.type().zone();
Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application())
.deployments().get(zone));
if (deployment.isEmpty() || deployment.get().at().isBefore(run.start()))
return run;
Instant deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow();
Instant from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10);
List<LogEntry> log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer()
.getLogs(new DeploymentId(id.application(), zone),
Map.of("from", Long.toString(from.toEpochMilli()))),
from);
if (run.hasStep(installTester) && run.versions().targetPlatform().isAfter(new Version("7.589.14"))) {
deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow();
from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10);
List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer()
.getLogs(new DeploymentId(id.tester().id(), zone),
Map.of("from", Long.toString(from.toEpochMilli()))),
from);
Instant justNow = controller.clock().instant().minusSeconds(2);
log = Stream.concat(log.stream(), testerLog.stream())
.filter(entry -> entry.at().isBefore(justNow))
.sorted(comparing(LogEntry::at))
.collect(toUnmodifiableList());
}
if (log.isEmpty())
return run;
logs.append(id.application(), id.type(), Step.copyVespaLogs, log);
return run.with(log.get(log.size() - 1).at());
});
} | if (run.hasStep(installTester) && run.versions().targetPlatform().isAfter(new Version("7.589.14"))) { | public void updateVespaLog(RunId id) {
locked(id, run -> {
if ( ! run.hasStep(copyVespaLogs))
return run;
ZoneId zone = id.type().zone();
Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application())
.deployments().get(zone));
if (deployment.isEmpty() || deployment.get().at().isBefore(run.start()))
return run;
Instant deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow();
Instant from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10);
List<LogEntry> log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer()
.getLogs(new DeploymentId(id.application(), zone),
Map.of("from", Long.toString(from.toEpochMilli()))),
from);
if (run.hasStep(installTester) && run.versions().targetPlatform().isAfter(new Version("7.589.14"))) {
deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow();
from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10);
List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer()
.getLogs(new DeploymentId(id.tester().id(), zone),
Map.of("from", Long.toString(from.toEpochMilli()))),
from);
Instant justNow = controller.clock().instant().minusSeconds(2);
log = Stream.concat(log.stream(), testerLog.stream())
.filter(entry -> entry.at().isBefore(justNow))
.sorted(comparing(LogEntry::at))
.collect(toUnmodifiableList());
}
if (log.isEmpty())
return run;
logs.append(id.application(), id.type(), Step.copyVespaLogs, log);
return run.with(log.get(log.size() - 1).at());
});
} | class JobController {
public static final Duration maxHistoryAge = Duration.ofDays(60);
private static final Logger log = Logger.getLogger(JobController.class.getName());
private final int historyLength;
private final Controller controller;
private final CuratorDb curator;
private final BufferedLogStore logs;
private final TesterCloud cloud;
private final JobMetrics metric;
private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { });
public JobController(Controller controller) {
this.historyLength = controller.system().isCd() ? 256 : 64;
this.controller = controller;
this.curator = controller.curator();
this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore());
this.cloud = controller.serviceRegistry().testerCloud();
this.metric = new JobMetrics(controller.metric(), controller::system);
}
public TesterCloud cloud() { return cloud; }
public int historyLength() { return historyLength; }
public void setRunner(Consumer<Run> runner) { this.runner.set(runner); }
/** Rewrite all job data with the newest format. */
public void updateStorage() {
for (ApplicationId id : instances())
for (JobType type : jobs(id)) {
locked(id, type, runs -> {
curator.readLastRun(id, type).ifPresent(curator::writeLastRun);
});
}
}
/** Returns all entries currently logged for the given run. */
public Optional<RunLog> details(RunId id) {
return details(id, -1);
}
/** Returns the logged entries for the given run, which are after the given id threshold. */
public Optional<RunLog> details(RunId id, long after) {
try (Mutex __ = curator.lock(id.application(), id.type())) {
Run run = runs(id.application(), id.type()).get(id);
if (run == null)
return Optional.empty();
return active(id).isPresent()
? Optional.of(logs.readActive(id.application(), id.type(), after))
: logs.readFinished(id, after);
}
}
/** Stores the given log entries for the given run and step. */
public void log(RunId id, Step step, List<LogEntry> entries) {
locked(id, __ -> {
logs.append(id.application(), id.type(), step, entries);
return __;
});
}
/** Stores the given log messages for the given run and step. */
public void log(RunId id, Step step, Level level, List<String> messages) {
log(id, step, messages.stream()
.map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message))
.collect(toList()));
}
/** Stores the given log message for the given run and step. */
public void log(RunId id, Step step, Level level, String message) {
log(id, step, level, Collections.singletonList(message));
}
/** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */
/** Fetches any new test log entries, and records the id of the last of these, for continuation. */
public void updateTestLog(RunId id) {
locked(id, run -> {
Optional<Step> step = Stream.of(endStagingSetup, endTests)
.filter(run.readySteps()::contains)
.findAny();
if (step.isEmpty())
return run;
List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()),
run.lastTestLogEntry());
if (entries.isEmpty())
return run;
logs.append(id.application(), id.type(), step.get(), entries);
return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong());
});
}
public void updateTestReport(RunId id) {
locked(id, run -> {
Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone()));
if (report.isEmpty()) {
return run;
}
logs.writeTestReport(id, report.get());
return run;
});
}
public Optional<String> getTestReports(RunId id) {
return logs.readTestReports(id);
}
/** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */
public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) {
locked(id, run -> run.with(testerCertificate));
}
/** Returns a list of all instances of applications which have registered. */
public List<ApplicationId> instances() {
return controller.applications().readable().stream()
.flatMap(application -> application.instances().values().stream())
.map(Instance::id)
.collect(toUnmodifiableList());
}
/** Returns all job types which have been run for the given application. */
private List<JobType> jobs(ApplicationId id) {
return JobType.allIn(controller.zoneRegistry()).stream()
.filter(type -> last(id, type).isPresent())
.collect(toUnmodifiableList());
}
/** Returns an immutable map of all known runs for the given application and job type. */
public NavigableMap<RunId, Run> runs(JobId id) {
return runs(id.application(), id.type());
}
/** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */
public List<Instant> jobStarts(JobId id) {
return runs(id).descendingMap().values().stream()
.filter(run -> ! run.isRedeployment())
.map(Run::start)
.collect(toUnmodifiableList());
}
/** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */
public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) {
return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream()
.findFirst()
.orElseGet(deployment::at);
}
/** Returns an immutable map of all known runs for the given application and job type. */
public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) {
ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number));
Optional<Run> last = last(id, type);
curator.readHistoricRuns(id, type).forEach((runId, run) -> {
if (last.isEmpty() || ! runId.equals(last.get().id()))
runs.put(runId, run);
});
last.ifPresent(run -> runs.put(run.id(), run));
return runs.build();
}
/** Returns the run with the given id, if it exists. */
public Optional<Run> run(RunId id) {
return runs(id.application(), id.type()).values().stream()
.filter(run -> run.id().equals(id))
.findAny();
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(JobId job) {
return curator.readLastRun(job.application(), job.type());
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(ApplicationId id, JobType type) {
return curator.readLastRun(id, type);
}
/** Returns the last completed of the given job. */
public Optional<Run> lastCompleted(JobId id) {
return JobStatus.lastCompleted(runs(id));
}
/** Returns the first failing of the given job. */
public Optional<Run> firstFailing(JobId id) {
return JobStatus.firstFailing(runs(id));
}
/** Returns the last success of the given job. */
public Optional<Run> lastSuccess(JobId id) {
return JobStatus.lastSuccess(runs(id));
}
/** Returns the run with the given id, provided it is still active. */
public Optional<Run> active(RunId id) {
return last(id.application(), id.type())
.filter(run -> ! run.hasEnded())
.filter(run -> run.id().equals(id));
}
/** Returns a list of all active runs. */
public List<Run> active() {
return controller.applications().idList().stream()
.flatMap(id -> active(id).stream())
.collect(toUnmodifiableList());
}
/** Returns a list of all active runs for the given application. */
public List<Run> active(TenantAndApplicationId id) {
return controller.applications().requireApplication(id).instances().keySet().stream()
.flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream()
.map(type -> last(id.instance(name), type))
.flatMap(Optional::stream)
.filter(run -> ! run.hasEnded()))
.collect(toUnmodifiableList());
}
/** Returns a list of all active runs for the given instance. */
public List<Run> active(ApplicationId id) {
return JobType.allIn(controller.zoneRegistry()).stream()
.map(type -> last(id, type))
.flatMap(Optional::stream)
.filter(run -> !run.hasEnded())
.collect(toUnmodifiableList());
}
/** Returns the job status of the given job, possibly empty. */
public JobStatus jobStatus(JobId id) {
return new JobStatus(id, runs(id));
}
/** Returns the deployment status of the given application. */
public DeploymentStatus deploymentStatus(Application application) {
return deploymentStatus(application, controller.readSystemVersion());
}
private DeploymentStatus deploymentStatus(Application application, Version systemVersion) {
return new DeploymentStatus(application,
this::jobStatus,
controller.zoneRegistry(),
systemVersion,
instance -> controller.applications().versionCompatibility(application.id().instance(instance)),
controller.clock().instant());
}
/** Adds deployment status to each of the given applications. */
public DeploymentStatusList deploymentStatuses(ApplicationList applications, Version systemVersion) {
return DeploymentStatusList.from(applications.asList().stream()
.map(application -> deploymentStatus(application, systemVersion))
.collect(toUnmodifiableList()));
}
/** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */
public DeploymentStatusList deploymentStatuses(ApplicationList applications) {
return deploymentStatuses(applications, controller.readSystemVersion());
}
/** Changes the status of the given step, for the given run, provided it is still active. */
public void update(RunId id, RunStatus status, LockedStep step) {
locked(id, run -> run.with(status, step));
}
/** Invoked when starting the step */
public void setStartTimestamp(RunId id, Instant timestamp, LockedStep step) {
locked(id, run -> run.with(timestamp, step));
}
/**
* Changes the status of the given run to inactive, and stores it as a historic run.
* Throws TimeoutException if some step in this job is still being run.
*/
public void finish(RunId id) throws TimeoutException {
Deque<Mutex> locks = new ArrayDeque<>();
try {
Run unlockedRun = run(id).get();
locks.push(curator.lock(id.application(), id.type(), report));
for (Step step : report.allPrerequisites(unlockedRun.steps().keySet()))
locks.push(curator.lock(id.application(), id.type(), step));
locked(id, run -> {
if (run.status() == reset) {
for (Step step : run.steps().keySet())
log(id, step, INFO, List.of("
return run.reset();
}
if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run;
Run finishedRun = run.finished(controller.clock().instant());
locked(id.application(), id.type(), runs -> {
runs.put(run.id(), finishedRun);
long last = id.number();
long successes = runs.values().stream().filter(Run::hasSucceeded).count();
var oldEntries = runs.entrySet().iterator();
for (var old = oldEntries.next();
old.getKey().number() <= last - historyLength
|| old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge));
old = oldEntries.next()) {
if ( successes == 1
&& old.getValue().hasSucceeded()
&& ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) {
oldEntries.next();
continue;
}
logs.delete(old.getKey());
oldEntries.remove();
}
});
logs.flush(id);
metric.jobFinished(run.id().job(), finishedRun.status());
pruneRevisions(unlockedRun);
return finishedRun;
});
}
finally {
for (Mutex lock : locks) {
try {
lock.close();
} catch (Throwable t) {
log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " +
"have been released in ZooKeeper, and if not this controller " +
"must be restarted to release the lock", t);
}
}
}
}
/** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */
public void abort(RunId id, String reason) {
locked(id, run -> {
run.stepStatuses().entrySet().stream()
.filter(entry -> entry.getValue() == unfinished)
.forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason));
return run.aborted();
});
}
/** Accepts and stores a new application package and test jar pair under a generated application version key. */
public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) {
ApplicationController applications = controller.applications();
AtomicReference<ApplicationVersion> version = new AtomicReference<>();
applications.lockApplicationOrThrow(id, application -> {
Optional<ApplicationVersion> previousVersion = application.get().revisions().last();
Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong()))
.map(ApplicationPackage::new);
long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L);
version.set(submission.toApplicationVersion(1 + previousBuild));
byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage()))
.orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage()));
applications.applicationStore().put(id.tenant(),
id.application(),
version.get().id(),
submission.applicationPackage().zippedContent(),
submission.testPackage(),
diff);
applications.applicationStore().putMeta(id.tenant(),
id.application(),
controller.clock().instant(),
submission.applicationPackage().metaDataZip());
application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId));
application = application.withRevisions(revisions -> revisions.with(version.get()));
application = withPrunedPackages(application);
TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage());
if (testSummary.problems().isEmpty())
controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage);
else
controller.notificationsDb().setNotification(NotificationSource.from(id),
Type.testPackage,
Notification.Level.warning,
testSummary.problems());
submission.applicationPackage().parentVersion().ifPresent(parent -> {
if (parent.getMajor() < controller.readSystemVersion().getMajor())
controller.notificationsDb().setNotification(NotificationSource.from(id),
Type.submission,
Notification.Level.warning,
"Parent version used to compile the application is on a " +
"lower major version than the current Vespa Cloud version");
else
controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission);
});
applications.storeWithUpdatedConfig(application, submission.applicationPackage());
applications.deploymentTrigger().triggerNewRevision(id);
});
return version.get();
}
private LockedApplication withPrunedPackages(LockedApplication application){
TenantAndApplicationId id = application.get().id();
Optional<RevisionId> oldestDeployed = application.get().oldestDeployedRevision();
if (oldestDeployed.isPresent()) {
controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed.get());
for (ApplicationVersion version : application.get().revisions().withPackage())
if (version.id().compareTo(oldestDeployed.get()) < 0)
application = application.withRevisions(revisions -> revisions.with(version.withoutPackage()));
}
return application;
}
/** Forget revisions no longer present in any relevant job history. */
private void pruneRevisions(Run run) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application());
boolean isProduction = run.versions().targetRevision().isProduction();
(isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream()
: Stream.of(jobStatus(run.id().job())))
.flatMap(jobs -> jobs.runs().values().stream())
.map(r -> r.versions().targetRevision())
.filter(id -> id.isProduction() == isProduction)
.min(naturalOrder())
.ifPresent(oldestRevision -> {
controller.applications().lockApplicationOrThrow(applicationId, application -> {
if (isProduction) {
controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number());
controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision)));
}
else {
controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number());
controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job())));
}
});
});
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) {
start(id, type, versions, isRedeployment, JobProfile.of(type), reason);
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) {
ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision());
if (revision.compileVersion()
.map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version))
.orElse(false))
throw new IllegalArgumentException("Will not start a job with incompatible platform version (" + versions.targetPlatform() + ") " +
"and compile versions (" + revision.compileVersion().get() + ")");
locked(id, type, __ -> {
Optional<Run> last = last(id, type);
if (last.flatMap(run -> active(run.id())).isPresent())
throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!");
RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1);
curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason));
metric.jobStarted(newId.job());
});
}
/** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) {
deploy(id, type, platform, applicationPackage, false);
}
/** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) {
if ( ! controller.zoneRegistry().hasZone(type.zone()))
throw new IllegalArgumentException(type.zone() + " is not present in this system");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
if ( ! application.get().instances().containsKey(id.instance()))
application = controller.applications().withNewInstance(application, id);
controller.applications().store(application);
});
DeploymentId deploymentId = new DeploymentId(id, type.zone());
Optional<Run> lastRun = last(id, type);
lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id()));
long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L);
RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type));
ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion());
byte[] diff = getDiff(applicationPackage, deploymentId, lastRun);
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff);
Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance())));
controller.applications().store(application.withRevisions(revisions -> revisions.with(version)));
start(id,
type,
new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())),
false,
dryRun ? JobProfile.developmentDryRun : JobProfile.development,
Optional.empty());
});
locked(id, type, __ -> {
runner.get().accept(last(id, type).get());
});
}
/* Application package diff against previous version, or against empty version if previous does not exist or is invalid */
private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) {
return lastRun.map(run -> run.versions().targetRevision())
.map(prevVersion -> {
ApplicationPackage previous;
try {
previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion));
} catch (IllegalArgumentException e) {
return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage);
}
return ApplicationPackageDiff.diff(previous, applicationPackage);
})
.orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage));
}
private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) {
Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion();
if (major.isPresent())
return controller.applications().lastCompatibleVersion(major.get())
.orElseThrow(() -> new IllegalArgumentException("major " + major.get() + " specified in deployment.xml, " +
"but no version on this major was found"));
VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId());
List<Version> versions = controller.readVersionStatus().deployableVersions().stream()
.map(VespaVersion::versionNumber)
.collect(toList());
instance.map(Instance::deployments)
.map(deployments -> deployments.get(id.zoneId()))
.map(Deployment::version)
.ifPresent(versions::add);
for (Version target : reversed(versions))
if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get()))
return target;
throw new IllegalArgumentException("no suitable platform version found" +
applicationPackage.compileVersion()
.map(version -> " for package compiled against " + version)
.orElse(""));
}
/** Aborts a run and waits for it complete. */
private void abortAndWait(RunId id) {
abort(id, "replaced by new deployment");
runner.get().accept(last(id.application(), id.type()).get());
while ( ! last(id.application(), id.type()).get().hasEnded()) {
try {
Thread.sleep(100);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
/** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */
public void collectGarbage() {
Set<ApplicationId> applicationsToBuild = new HashSet<>(instances());
curator.applicationsWithJobs().stream()
.filter(id -> ! applicationsToBuild.contains(id))
.forEach(id -> {
try {
TesterId tester = TesterId.of(id);
for (JobType type : jobs(id))
locked(id, type, deactivateTester, __ -> {
try (Mutex ___ = curator.lock(id, type)) {
try {
deactivateTester(tester, type);
}
catch (Exception e) {
}
curator.deleteRunData(id, type);
}
});
logs.delete(id);
curator.deleteRunData(id);
}
catch (Exception e) {
log.log(WARNING, "failed cleaning up after deleted application", e);
}
});
}
public void deactivateTester(TesterId id, JobType type) {
controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone()));
}
/** Locks all runs and modifies the list of historic runs for the given application and job type. */
private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) {
try (Mutex __ = curator.lock(id, type)) {
SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type));
modifications.accept(runs);
curator.writeHistoricRuns(id, type, runs.values());
}
}
/** Locks and modifies the run with the given id, provided it is still active. */
public void locked(RunId id, UnaryOperator<Run> modifications) {
try (Mutex __ = curator.lock(id.application(), id.type())) {
active(id).ifPresent(run -> {
curator.writeLastRun(modifications.apply(run));
});
}
}
/** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */
public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException {
try (Mutex lock = curator.lock(id, type, step)) {
for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet()))
try (Mutex __ = curator.lock(id, type, prerequisite)) { ; }
action.accept(new LockedStep(lock, step));
}
}
} | class JobController {
public static final Duration maxHistoryAge = Duration.ofDays(60);
private static final Logger log = Logger.getLogger(JobController.class.getName());
private final int historyLength;
private final Controller controller;
private final CuratorDb curator;
private final BufferedLogStore logs;
private final TesterCloud cloud;
private final JobMetrics metric;
private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { });
public JobController(Controller controller) {
this.historyLength = controller.system().isCd() ? 256 : 64;
this.controller = controller;
this.curator = controller.curator();
this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore());
this.cloud = controller.serviceRegistry().testerCloud();
this.metric = new JobMetrics(controller.metric(), controller::system);
}
public TesterCloud cloud() { return cloud; }
public int historyLength() { return historyLength; }
public void setRunner(Consumer<Run> runner) { this.runner.set(runner); }
/** Rewrite all job data with the newest format. */
public void updateStorage() {
for (ApplicationId id : instances())
for (JobType type : jobs(id)) {
locked(id, type, runs -> {
curator.readLastRun(id, type).ifPresent(curator::writeLastRun);
});
}
}
/** Returns all entries currently logged for the given run. */
public Optional<RunLog> details(RunId id) {
return details(id, -1);
}
/** Returns the logged entries for the given run, which are after the given id threshold. */
public Optional<RunLog> details(RunId id, long after) {
try (Mutex __ = curator.lock(id.application(), id.type())) {
Run run = runs(id.application(), id.type()).get(id);
if (run == null)
return Optional.empty();
return active(id).isPresent()
? Optional.of(logs.readActive(id.application(), id.type(), after))
: logs.readFinished(id, after);
}
}
/** Stores the given log entries for the given run and step. */
public void log(RunId id, Step step, List<LogEntry> entries) {
locked(id, __ -> {
logs.append(id.application(), id.type(), step, entries);
return __;
});
}
/** Stores the given log messages for the given run and step. */
public void log(RunId id, Step step, Level level, List<String> messages) {
log(id, step, messages.stream()
.map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message))
.collect(toList()));
}
/** Stores the given log message for the given run and step. */
public void log(RunId id, Step step, Level level, String message) {
log(id, step, level, Collections.singletonList(message));
}
/** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */
/** Fetches any new test log entries, and records the id of the last of these, for continuation. */
public void updateTestLog(RunId id) {
locked(id, run -> {
Optional<Step> step = Stream.of(endStagingSetup, endTests)
.filter(run.readySteps()::contains)
.findAny();
if (step.isEmpty())
return run;
List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()),
run.lastTestLogEntry());
if (entries.isEmpty())
return run;
logs.append(id.application(), id.type(), step.get(), entries);
return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong());
});
}
public void updateTestReport(RunId id) {
locked(id, run -> {
Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone()));
if (report.isEmpty()) {
return run;
}
logs.writeTestReport(id, report.get());
return run;
});
}
public Optional<String> getTestReports(RunId id) {
return logs.readTestReports(id);
}
/** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */
public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) {
locked(id, run -> run.with(testerCertificate));
}
/** Returns a list of all instances of applications which have registered. */
public List<ApplicationId> instances() {
return controller.applications().readable().stream()
.flatMap(application -> application.instances().values().stream())
.map(Instance::id)
.collect(toUnmodifiableList());
}
/** Returns all job types which have been run for the given application. */
private List<JobType> jobs(ApplicationId id) {
return JobType.allIn(controller.zoneRegistry()).stream()
.filter(type -> last(id, type).isPresent())
.collect(toUnmodifiableList());
}
/** Returns an immutable map of all known runs for the given application and job type. */
public NavigableMap<RunId, Run> runs(JobId id) {
return runs(id.application(), id.type());
}
/** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */
public List<Instant> jobStarts(JobId id) {
return runs(id).descendingMap().values().stream()
.filter(run -> ! run.isRedeployment())
.map(Run::start)
.collect(toUnmodifiableList());
}
/** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */
public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) {
return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream()
.findFirst()
.orElseGet(deployment::at);
}
/** Returns an immutable map of all known runs for the given application and job type. */
public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) {
ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number));
Optional<Run> last = last(id, type);
curator.readHistoricRuns(id, type).forEach((runId, run) -> {
if (last.isEmpty() || ! runId.equals(last.get().id()))
runs.put(runId, run);
});
last.ifPresent(run -> runs.put(run.id(), run));
return runs.build();
}
/** Returns the run with the given id, if it exists. */
public Optional<Run> run(RunId id) {
return runs(id.application(), id.type()).values().stream()
.filter(run -> run.id().equals(id))
.findAny();
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(JobId job) {
return curator.readLastRun(job.application(), job.type());
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(ApplicationId id, JobType type) {
return curator.readLastRun(id, type);
}
/** Returns the last completed of the given job. */
public Optional<Run> lastCompleted(JobId id) {
return JobStatus.lastCompleted(runs(id));
}
/** Returns the first failing of the given job. */
public Optional<Run> firstFailing(JobId id) {
return JobStatus.firstFailing(runs(id));
}
/** Returns the last success of the given job. */
public Optional<Run> lastSuccess(JobId id) {
return JobStatus.lastSuccess(runs(id));
}
/** Returns the run with the given id, provided it is still active. */
public Optional<Run> active(RunId id) {
return last(id.application(), id.type())
.filter(run -> ! run.hasEnded())
.filter(run -> run.id().equals(id));
}
/** Returns a list of all active runs. */
public List<Run> active() {
return controller.applications().idList().stream()
.flatMap(id -> active(id).stream())
.collect(toUnmodifiableList());
}
/** Returns a list of all active runs for the given application. */
public List<Run> active(TenantAndApplicationId id) {
return controller.applications().requireApplication(id).instances().keySet().stream()
.flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream()
.map(type -> last(id.instance(name), type))
.flatMap(Optional::stream)
.filter(run -> ! run.hasEnded()))
.collect(toUnmodifiableList());
}
/** Returns a list of all active runs for the given instance. */
public List<Run> active(ApplicationId id) {
return JobType.allIn(controller.zoneRegistry()).stream()
.map(type -> last(id, type))
.flatMap(Optional::stream)
.filter(run -> !run.hasEnded())
.collect(toUnmodifiableList());
}
/** Returns the job status of the given job, possibly empty. */
public JobStatus jobStatus(JobId id) {
return new JobStatus(id, runs(id));
}
/** Returns the deployment status of the given application. */
public DeploymentStatus deploymentStatus(Application application) {
return deploymentStatus(application, controller.readSystemVersion());
}
private DeploymentStatus deploymentStatus(Application application, Version systemVersion) {
return new DeploymentStatus(application,
this::jobStatus,
controller.zoneRegistry(),
systemVersion,
instance -> controller.applications().versionCompatibility(application.id().instance(instance)),
controller.clock().instant());
}
/** Adds deployment status to each of the given applications. */
public DeploymentStatusList deploymentStatuses(ApplicationList applications, Version systemVersion) {
return DeploymentStatusList.from(applications.asList().stream()
.map(application -> deploymentStatus(application, systemVersion))
.collect(toUnmodifiableList()));
}
/** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */
public DeploymentStatusList deploymentStatuses(ApplicationList applications) {
return deploymentStatuses(applications, controller.readSystemVersion());
}
/** Changes the status of the given step, for the given run, provided it is still active. */
public void update(RunId id, RunStatus status, LockedStep step) {
locked(id, run -> run.with(status, step));
}
/** Invoked when starting the step */
public void setStartTimestamp(RunId id, Instant timestamp, LockedStep step) {
locked(id, run -> run.with(timestamp, step));
}
/**
* Changes the status of the given run to inactive, and stores it as a historic run.
* Throws TimeoutException if some step in this job is still being run.
*/
public void finish(RunId id) throws TimeoutException {
Deque<Mutex> locks = new ArrayDeque<>();
try {
Run unlockedRun = run(id).get();
locks.push(curator.lock(id.application(), id.type(), report));
for (Step step : report.allPrerequisites(unlockedRun.steps().keySet()))
locks.push(curator.lock(id.application(), id.type(), step));
locked(id, run -> {
if (run.status() == reset) {
for (Step step : run.steps().keySet())
log(id, step, INFO, List.of("
return run.reset();
}
if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run;
Run finishedRun = run.finished(controller.clock().instant());
locked(id.application(), id.type(), runs -> {
runs.put(run.id(), finishedRun);
long last = id.number();
long successes = runs.values().stream().filter(Run::hasSucceeded).count();
var oldEntries = runs.entrySet().iterator();
for (var old = oldEntries.next();
old.getKey().number() <= last - historyLength
|| old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge));
old = oldEntries.next()) {
if ( successes == 1
&& old.getValue().hasSucceeded()
&& ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) {
oldEntries.next();
continue;
}
logs.delete(old.getKey());
oldEntries.remove();
}
});
logs.flush(id);
metric.jobFinished(run.id().job(), finishedRun.status());
pruneRevisions(unlockedRun);
return finishedRun;
});
}
finally {
for (Mutex lock : locks) {
try {
lock.close();
} catch (Throwable t) {
log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " +
"have been released in ZooKeeper, and if not this controller " +
"must be restarted to release the lock", t);
}
}
}
}
/** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */
public void abort(RunId id, String reason) {
locked(id, run -> {
run.stepStatuses().entrySet().stream()
.filter(entry -> entry.getValue() == unfinished)
.forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason));
return run.aborted();
});
}
/** Accepts and stores a new application package and test jar pair under a generated application version key. */
public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) {
ApplicationController applications = controller.applications();
AtomicReference<ApplicationVersion> version = new AtomicReference<>();
applications.lockApplicationOrThrow(id, application -> {
Optional<ApplicationVersion> previousVersion = application.get().revisions().last();
Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong()))
.map(ApplicationPackage::new);
long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L);
version.set(submission.toApplicationVersion(1 + previousBuild));
byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage()))
.orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage()));
applications.applicationStore().put(id.tenant(),
id.application(),
version.get().id(),
submission.applicationPackage().zippedContent(),
submission.testPackage(),
diff);
applications.applicationStore().putMeta(id.tenant(),
id.application(),
controller.clock().instant(),
submission.applicationPackage().metaDataZip());
application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId));
application = application.withRevisions(revisions -> revisions.with(version.get()));
application = withPrunedPackages(application);
TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage());
if (testSummary.problems().isEmpty())
controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage);
else
controller.notificationsDb().setNotification(NotificationSource.from(id),
Type.testPackage,
Notification.Level.warning,
testSummary.problems());
submission.applicationPackage().parentVersion().ifPresent(parent -> {
if (parent.getMajor() < controller.readSystemVersion().getMajor())
controller.notificationsDb().setNotification(NotificationSource.from(id),
Type.submission,
Notification.Level.warning,
"Parent version used to compile the application is on a " +
"lower major version than the current Vespa Cloud version");
else
controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission);
});
applications.storeWithUpdatedConfig(application, submission.applicationPackage());
applications.deploymentTrigger().triggerNewRevision(id);
});
return version.get();
}
private LockedApplication withPrunedPackages(LockedApplication application){
TenantAndApplicationId id = application.get().id();
Optional<RevisionId> oldestDeployed = application.get().oldestDeployedRevision();
if (oldestDeployed.isPresent()) {
controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed.get());
for (ApplicationVersion version : application.get().revisions().withPackage())
if (version.id().compareTo(oldestDeployed.get()) < 0)
application = application.withRevisions(revisions -> revisions.with(version.withoutPackage()));
}
return application;
}
/** Forget revisions no longer present in any relevant job history. */
private void pruneRevisions(Run run) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application());
boolean isProduction = run.versions().targetRevision().isProduction();
(isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream()
: Stream.of(jobStatus(run.id().job())))
.flatMap(jobs -> jobs.runs().values().stream())
.map(r -> r.versions().targetRevision())
.filter(id -> id.isProduction() == isProduction)
.min(naturalOrder())
.ifPresent(oldestRevision -> {
controller.applications().lockApplicationOrThrow(applicationId, application -> {
if (isProduction) {
controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number());
controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision)));
}
else {
controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number());
controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job())));
}
});
});
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) {
start(id, type, versions, isRedeployment, JobProfile.of(type), reason);
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) {
ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision());
if (revision.compileVersion()
.map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version))
.orElse(false))
throw new IllegalArgumentException("Will not start a job with incompatible platform version (" + versions.targetPlatform() + ") " +
"and compile versions (" + revision.compileVersion().get() + ")");
locked(id, type, __ -> {
Optional<Run> last = last(id, type);
if (last.flatMap(run -> active(run.id())).isPresent())
throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!");
RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1);
curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason));
metric.jobStarted(newId.job());
});
}
/** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) {
deploy(id, type, platform, applicationPackage, false);
}
/** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) {
if ( ! controller.zoneRegistry().hasZone(type.zone()))
throw new IllegalArgumentException(type.zone() + " is not present in this system");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
if ( ! application.get().instances().containsKey(id.instance()))
application = controller.applications().withNewInstance(application, id);
controller.applications().store(application);
});
DeploymentId deploymentId = new DeploymentId(id, type.zone());
Optional<Run> lastRun = last(id, type);
lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id()));
long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L);
RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type));
ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion());
byte[] diff = getDiff(applicationPackage, deploymentId, lastRun);
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff);
Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance())));
controller.applications().store(application.withRevisions(revisions -> revisions.with(version)));
start(id,
type,
new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())),
false,
dryRun ? JobProfile.developmentDryRun : JobProfile.development,
Optional.empty());
});
locked(id, type, __ -> {
runner.get().accept(last(id, type).get());
});
}
/* Application package diff against previous version, or against empty version if previous does not exist or is invalid */
private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) {
return lastRun.map(run -> run.versions().targetRevision())
.map(prevVersion -> {
ApplicationPackage previous;
try {
previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion));
} catch (IllegalArgumentException e) {
return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage);
}
return ApplicationPackageDiff.diff(previous, applicationPackage);
})
.orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage));
}
private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) {
Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion();
if (major.isPresent())
return controller.applications().lastCompatibleVersion(major.get())
.orElseThrow(() -> new IllegalArgumentException("major " + major.get() + " specified in deployment.xml, " +
"but no version on this major was found"));
VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId());
List<Version> versions = controller.readVersionStatus().deployableVersions().stream()
.map(VespaVersion::versionNumber)
.collect(toList());
instance.map(Instance::deployments)
.map(deployments -> deployments.get(id.zoneId()))
.map(Deployment::version)
.ifPresent(versions::add);
for (Version target : reversed(versions))
if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get()))
return target;
throw new IllegalArgumentException("no suitable platform version found" +
applicationPackage.compileVersion()
.map(version -> " for package compiled against " + version)
.orElse(""));
}
/** Aborts a run and waits for it complete. */
private void abortAndWait(RunId id) {
abort(id, "replaced by new deployment");
runner.get().accept(last(id.application(), id.type()).get());
while ( ! last(id.application(), id.type()).get().hasEnded()) {
try {
Thread.sleep(100);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
/** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */
public void collectGarbage() {
Set<ApplicationId> applicationsToBuild = new HashSet<>(instances());
curator.applicationsWithJobs().stream()
.filter(id -> ! applicationsToBuild.contains(id))
.forEach(id -> {
try {
TesterId tester = TesterId.of(id);
for (JobType type : jobs(id))
locked(id, type, deactivateTester, __ -> {
try (Mutex ___ = curator.lock(id, type)) {
try {
deactivateTester(tester, type);
}
catch (Exception e) {
}
curator.deleteRunData(id, type);
}
});
logs.delete(id);
curator.deleteRunData(id);
}
catch (Exception e) {
log.log(WARNING, "failed cleaning up after deleted application", e);
}
});
}
public void deactivateTester(TesterId id, JobType type) {
controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone()));
}
/** Locks all runs and modifies the list of historic runs for the given application and job type. */
private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) {
try (Mutex __ = curator.lock(id, type)) {
SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type));
modifications.accept(runs);
curator.writeHistoricRuns(id, type, runs.values());
}
}
/** Locks and modifies the run with the given id, provided it is still active. */
public void locked(RunId id, UnaryOperator<Run> modifications) {
try (Mutex __ = curator.lock(id.application(), id.type())) {
active(id).ifPresent(run -> {
curator.writeLastRun(modifications.apply(run));
});
}
}
/** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */
public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException {
try (Mutex lock = curator.lock(id, type, step)) {
for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet()))
try (Mutex __ = curator.lock(id, type, prerequisite)) { ; }
action.accept(new LockedStep(lock, step));
}
}
} |
No, that's fine. It was the version which started setting up the tester logservers. | public void updateVespaLog(RunId id) {
locked(id, run -> {
if ( ! run.hasStep(copyVespaLogs))
return run;
ZoneId zone = id.type().zone();
Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application())
.deployments().get(zone));
if (deployment.isEmpty() || deployment.get().at().isBefore(run.start()))
return run;
Instant deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow();
Instant from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10);
List<LogEntry> log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer()
.getLogs(new DeploymentId(id.application(), zone),
Map.of("from", Long.toString(from.toEpochMilli()))),
from);
if (run.hasStep(installTester) && run.versions().targetPlatform().isAfter(new Version("7.589.14"))) {
deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow();
from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10);
List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer()
.getLogs(new DeploymentId(id.tester().id(), zone),
Map.of("from", Long.toString(from.toEpochMilli()))),
from);
Instant justNow = controller.clock().instant().minusSeconds(2);
log = Stream.concat(log.stream(), testerLog.stream())
.filter(entry -> entry.at().isBefore(justNow))
.sorted(comparing(LogEntry::at))
.collect(toUnmodifiableList());
}
if (log.isEmpty())
return run;
logs.append(id.application(), id.type(), Step.copyVespaLogs, log);
return run.with(log.get(log.size() - 1).at());
});
} | if (run.hasStep(installTester) && run.versions().targetPlatform().isAfter(new Version("7.589.14"))) { | public void updateVespaLog(RunId id) {
locked(id, run -> {
if ( ! run.hasStep(copyVespaLogs))
return run;
ZoneId zone = id.type().zone();
Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application())
.deployments().get(zone));
if (deployment.isEmpty() || deployment.get().at().isBefore(run.start()))
return run;
Instant deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow();
Instant from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10);
List<LogEntry> log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer()
.getLogs(new DeploymentId(id.application(), zone),
Map.of("from", Long.toString(from.toEpochMilli()))),
from);
if (run.hasStep(installTester) && run.versions().targetPlatform().isAfter(new Version("7.589.14"))) {
deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow();
from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10);
List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer()
.getLogs(new DeploymentId(id.tester().id(), zone),
Map.of("from", Long.toString(from.toEpochMilli()))),
from);
Instant justNow = controller.clock().instant().minusSeconds(2);
log = Stream.concat(log.stream(), testerLog.stream())
.filter(entry -> entry.at().isBefore(justNow))
.sorted(comparing(LogEntry::at))
.collect(toUnmodifiableList());
}
if (log.isEmpty())
return run;
logs.append(id.application(), id.type(), Step.copyVespaLogs, log);
return run.with(log.get(log.size() - 1).at());
});
} | class JobController {
public static final Duration maxHistoryAge = Duration.ofDays(60);
private static final Logger log = Logger.getLogger(JobController.class.getName());
private final int historyLength;
private final Controller controller;
private final CuratorDb curator;
private final BufferedLogStore logs;
private final TesterCloud cloud;
private final JobMetrics metric;
private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { });
public JobController(Controller controller) {
this.historyLength = controller.system().isCd() ? 256 : 64;
this.controller = controller;
this.curator = controller.curator();
this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore());
this.cloud = controller.serviceRegistry().testerCloud();
this.metric = new JobMetrics(controller.metric(), controller::system);
}
public TesterCloud cloud() { return cloud; }
public int historyLength() { return historyLength; }
public void setRunner(Consumer<Run> runner) { this.runner.set(runner); }
/** Rewrite all job data with the newest format. */
public void updateStorage() {
for (ApplicationId id : instances())
for (JobType type : jobs(id)) {
locked(id, type, runs -> {
curator.readLastRun(id, type).ifPresent(curator::writeLastRun);
});
}
}
/** Returns all entries currently logged for the given run. */
public Optional<RunLog> details(RunId id) {
return details(id, -1);
}
/** Returns the logged entries for the given run, which are after the given id threshold. */
public Optional<RunLog> details(RunId id, long after) {
try (Mutex __ = curator.lock(id.application(), id.type())) {
Run run = runs(id.application(), id.type()).get(id);
if (run == null)
return Optional.empty();
return active(id).isPresent()
? Optional.of(logs.readActive(id.application(), id.type(), after))
: logs.readFinished(id, after);
}
}
/** Stores the given log entries for the given run and step. */
public void log(RunId id, Step step, List<LogEntry> entries) {
locked(id, __ -> {
logs.append(id.application(), id.type(), step, entries);
return __;
});
}
/** Stores the given log messages for the given run and step. */
public void log(RunId id, Step step, Level level, List<String> messages) {
log(id, step, messages.stream()
.map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message))
.collect(toList()));
}
/** Stores the given log message for the given run and step. */
public void log(RunId id, Step step, Level level, String message) {
log(id, step, level, Collections.singletonList(message));
}
/** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */
/** Fetches any new test log entries, and records the id of the last of these, for continuation. */
public void updateTestLog(RunId id) {
locked(id, run -> {
Optional<Step> step = Stream.of(endStagingSetup, endTests)
.filter(run.readySteps()::contains)
.findAny();
if (step.isEmpty())
return run;
List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()),
run.lastTestLogEntry());
if (entries.isEmpty())
return run;
logs.append(id.application(), id.type(), step.get(), entries);
return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong());
});
}
public void updateTestReport(RunId id) {
locked(id, run -> {
Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone()));
if (report.isEmpty()) {
return run;
}
logs.writeTestReport(id, report.get());
return run;
});
}
public Optional<String> getTestReports(RunId id) {
return logs.readTestReports(id);
}
/** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */
public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) {
locked(id, run -> run.with(testerCertificate));
}
/** Returns a list of all instances of applications which have registered. */
public List<ApplicationId> instances() {
return controller.applications().readable().stream()
.flatMap(application -> application.instances().values().stream())
.map(Instance::id)
.collect(toUnmodifiableList());
}
/** Returns all job types which have been run for the given application. */
private List<JobType> jobs(ApplicationId id) {
return JobType.allIn(controller.zoneRegistry()).stream()
.filter(type -> last(id, type).isPresent())
.collect(toUnmodifiableList());
}
/** Returns an immutable map of all known runs for the given application and job type. */
public NavigableMap<RunId, Run> runs(JobId id) {
return runs(id.application(), id.type());
}
/** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */
public List<Instant> jobStarts(JobId id) {
return runs(id).descendingMap().values().stream()
.filter(run -> ! run.isRedeployment())
.map(Run::start)
.collect(toUnmodifiableList());
}
/** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */
public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) {
return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream()
.findFirst()
.orElseGet(deployment::at);
}
/** Returns an immutable map of all known runs for the given application and job type. */
public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) {
ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number));
Optional<Run> last = last(id, type);
curator.readHistoricRuns(id, type).forEach((runId, run) -> {
if (last.isEmpty() || ! runId.equals(last.get().id()))
runs.put(runId, run);
});
last.ifPresent(run -> runs.put(run.id(), run));
return runs.build();
}
/** Returns the run with the given id, if it exists. */
public Optional<Run> run(RunId id) {
return runs(id.application(), id.type()).values().stream()
.filter(run -> run.id().equals(id))
.findAny();
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(JobId job) {
return curator.readLastRun(job.application(), job.type());
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(ApplicationId id, JobType type) {
return curator.readLastRun(id, type);
}
/** Returns the last completed of the given job. */
public Optional<Run> lastCompleted(JobId id) {
return JobStatus.lastCompleted(runs(id));
}
/** Returns the first failing of the given job. */
public Optional<Run> firstFailing(JobId id) {
return JobStatus.firstFailing(runs(id));
}
/** Returns the last success of the given job. */
public Optional<Run> lastSuccess(JobId id) {
return JobStatus.lastSuccess(runs(id));
}
/** Returns the run with the given id, provided it is still active. */
public Optional<Run> active(RunId id) {
return last(id.application(), id.type())
.filter(run -> ! run.hasEnded())
.filter(run -> run.id().equals(id));
}
/** Returns a list of all active runs. */
public List<Run> active() {
return controller.applications().idList().stream()
.flatMap(id -> active(id).stream())
.collect(toUnmodifiableList());
}
/** Returns a list of all active runs for the given application. */
public List<Run> active(TenantAndApplicationId id) {
return controller.applications().requireApplication(id).instances().keySet().stream()
.flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream()
.map(type -> last(id.instance(name), type))
.flatMap(Optional::stream)
.filter(run -> ! run.hasEnded()))
.collect(toUnmodifiableList());
}
/** Returns a list of all active runs for the given instance. */
public List<Run> active(ApplicationId id) {
return JobType.allIn(controller.zoneRegistry()).stream()
.map(type -> last(id, type))
.flatMap(Optional::stream)
.filter(run -> !run.hasEnded())
.collect(toUnmodifiableList());
}
/** Returns the job status of the given job, possibly empty. */
public JobStatus jobStatus(JobId id) {
return new JobStatus(id, runs(id));
}
/** Returns the deployment status of the given application. */
public DeploymentStatus deploymentStatus(Application application) {
return deploymentStatus(application, controller.readSystemVersion());
}
private DeploymentStatus deploymentStatus(Application application, Version systemVersion) {
return new DeploymentStatus(application,
this::jobStatus,
controller.zoneRegistry(),
systemVersion,
instance -> controller.applications().versionCompatibility(application.id().instance(instance)),
controller.clock().instant());
}
/** Adds deployment status to each of the given applications. */
public DeploymentStatusList deploymentStatuses(ApplicationList applications, Version systemVersion) {
return DeploymentStatusList.from(applications.asList().stream()
.map(application -> deploymentStatus(application, systemVersion))
.collect(toUnmodifiableList()));
}
/** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */
public DeploymentStatusList deploymentStatuses(ApplicationList applications) {
return deploymentStatuses(applications, controller.readSystemVersion());
}
/** Changes the status of the given step, for the given run, provided it is still active. */
public void update(RunId id, RunStatus status, LockedStep step) {
locked(id, run -> run.with(status, step));
}
/** Invoked when starting the step */
public void setStartTimestamp(RunId id, Instant timestamp, LockedStep step) {
locked(id, run -> run.with(timestamp, step));
}
/**
* Changes the status of the given run to inactive, and stores it as a historic run.
* Throws TimeoutException if some step in this job is still being run.
*/
public void finish(RunId id) throws TimeoutException {
Deque<Mutex> locks = new ArrayDeque<>();
try {
Run unlockedRun = run(id).get();
locks.push(curator.lock(id.application(), id.type(), report));
for (Step step : report.allPrerequisites(unlockedRun.steps().keySet()))
locks.push(curator.lock(id.application(), id.type(), step));
locked(id, run -> {
if (run.status() == reset) {
for (Step step : run.steps().keySet())
log(id, step, INFO, List.of("
return run.reset();
}
if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run;
Run finishedRun = run.finished(controller.clock().instant());
locked(id.application(), id.type(), runs -> {
runs.put(run.id(), finishedRun);
long last = id.number();
long successes = runs.values().stream().filter(Run::hasSucceeded).count();
var oldEntries = runs.entrySet().iterator();
for (var old = oldEntries.next();
old.getKey().number() <= last - historyLength
|| old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge));
old = oldEntries.next()) {
if ( successes == 1
&& old.getValue().hasSucceeded()
&& ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) {
oldEntries.next();
continue;
}
logs.delete(old.getKey());
oldEntries.remove();
}
});
logs.flush(id);
metric.jobFinished(run.id().job(), finishedRun.status());
pruneRevisions(unlockedRun);
return finishedRun;
});
}
finally {
for (Mutex lock : locks) {
try {
lock.close();
} catch (Throwable t) {
log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " +
"have been released in ZooKeeper, and if not this controller " +
"must be restarted to release the lock", t);
}
}
}
}
/** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */
public void abort(RunId id, String reason) {
locked(id, run -> {
run.stepStatuses().entrySet().stream()
.filter(entry -> entry.getValue() == unfinished)
.forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason));
return run.aborted();
});
}
/** Accepts and stores a new application package and test jar pair under a generated application version key. */
public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) {
ApplicationController applications = controller.applications();
AtomicReference<ApplicationVersion> version = new AtomicReference<>();
applications.lockApplicationOrThrow(id, application -> {
Optional<ApplicationVersion> previousVersion = application.get().revisions().last();
Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong()))
.map(ApplicationPackage::new);
long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L);
version.set(submission.toApplicationVersion(1 + previousBuild));
byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage()))
.orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage()));
applications.applicationStore().put(id.tenant(),
id.application(),
version.get().id(),
submission.applicationPackage().zippedContent(),
submission.testPackage(),
diff);
applications.applicationStore().putMeta(id.tenant(),
id.application(),
controller.clock().instant(),
submission.applicationPackage().metaDataZip());
application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId));
application = application.withRevisions(revisions -> revisions.with(version.get()));
application = withPrunedPackages(application);
TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage());
if (testSummary.problems().isEmpty())
controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage);
else
controller.notificationsDb().setNotification(NotificationSource.from(id),
Type.testPackage,
Notification.Level.warning,
testSummary.problems());
submission.applicationPackage().parentVersion().ifPresent(parent -> {
if (parent.getMajor() < controller.readSystemVersion().getMajor())
controller.notificationsDb().setNotification(NotificationSource.from(id),
Type.submission,
Notification.Level.warning,
"Parent version used to compile the application is on a " +
"lower major version than the current Vespa Cloud version");
else
controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission);
});
applications.storeWithUpdatedConfig(application, submission.applicationPackage());
applications.deploymentTrigger().triggerNewRevision(id);
});
return version.get();
}
private LockedApplication withPrunedPackages(LockedApplication application){
TenantAndApplicationId id = application.get().id();
Optional<RevisionId> oldestDeployed = application.get().oldestDeployedRevision();
if (oldestDeployed.isPresent()) {
controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed.get());
for (ApplicationVersion version : application.get().revisions().withPackage())
if (version.id().compareTo(oldestDeployed.get()) < 0)
application = application.withRevisions(revisions -> revisions.with(version.withoutPackage()));
}
return application;
}
/** Forget revisions no longer present in any relevant job history. */
private void pruneRevisions(Run run) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application());
boolean isProduction = run.versions().targetRevision().isProduction();
(isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream()
: Stream.of(jobStatus(run.id().job())))
.flatMap(jobs -> jobs.runs().values().stream())
.map(r -> r.versions().targetRevision())
.filter(id -> id.isProduction() == isProduction)
.min(naturalOrder())
.ifPresent(oldestRevision -> {
controller.applications().lockApplicationOrThrow(applicationId, application -> {
if (isProduction) {
controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number());
controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision)));
}
else {
controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number());
controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job())));
}
});
});
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) {
start(id, type, versions, isRedeployment, JobProfile.of(type), reason);
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) {
ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision());
if (revision.compileVersion()
.map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version))
.orElse(false))
throw new IllegalArgumentException("Will not start a job with incompatible platform version (" + versions.targetPlatform() + ") " +
"and compile versions (" + revision.compileVersion().get() + ")");
locked(id, type, __ -> {
Optional<Run> last = last(id, type);
if (last.flatMap(run -> active(run.id())).isPresent())
throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!");
RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1);
curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason));
metric.jobStarted(newId.job());
});
}
/** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) {
deploy(id, type, platform, applicationPackage, false);
}
/** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) {
if ( ! controller.zoneRegistry().hasZone(type.zone()))
throw new IllegalArgumentException(type.zone() + " is not present in this system");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
if ( ! application.get().instances().containsKey(id.instance()))
application = controller.applications().withNewInstance(application, id);
controller.applications().store(application);
});
DeploymentId deploymentId = new DeploymentId(id, type.zone());
Optional<Run> lastRun = last(id, type);
lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id()));
long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L);
RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type));
ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion());
byte[] diff = getDiff(applicationPackage, deploymentId, lastRun);
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff);
Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance())));
controller.applications().store(application.withRevisions(revisions -> revisions.with(version)));
start(id,
type,
new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())),
false,
dryRun ? JobProfile.developmentDryRun : JobProfile.development,
Optional.empty());
});
locked(id, type, __ -> {
runner.get().accept(last(id, type).get());
});
}
/* Application package diff against previous version, or against empty version if previous does not exist or is invalid */
private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) {
return lastRun.map(run -> run.versions().targetRevision())
.map(prevVersion -> {
ApplicationPackage previous;
try {
previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion));
} catch (IllegalArgumentException e) {
return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage);
}
return ApplicationPackageDiff.diff(previous, applicationPackage);
})
.orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage));
}
private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) {
Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion();
if (major.isPresent())
return controller.applications().lastCompatibleVersion(major.get())
.orElseThrow(() -> new IllegalArgumentException("major " + major.get() + " specified in deployment.xml, " +
"but no version on this major was found"));
VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId());
List<Version> versions = controller.readVersionStatus().deployableVersions().stream()
.map(VespaVersion::versionNumber)
.collect(toList());
instance.map(Instance::deployments)
.map(deployments -> deployments.get(id.zoneId()))
.map(Deployment::version)
.ifPresent(versions::add);
for (Version target : reversed(versions))
if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get()))
return target;
throw new IllegalArgumentException("no suitable platform version found" +
applicationPackage.compileVersion()
.map(version -> " for package compiled against " + version)
.orElse(""));
}
/** Aborts a run and waits for it complete. */
private void abortAndWait(RunId id) {
abort(id, "replaced by new deployment");
runner.get().accept(last(id.application(), id.type()).get());
while ( ! last(id.application(), id.type()).get().hasEnded()) {
try {
Thread.sleep(100);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
/** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */
public void collectGarbage() {
Set<ApplicationId> applicationsToBuild = new HashSet<>(instances());
curator.applicationsWithJobs().stream()
.filter(id -> ! applicationsToBuild.contains(id))
.forEach(id -> {
try {
TesterId tester = TesterId.of(id);
for (JobType type : jobs(id))
locked(id, type, deactivateTester, __ -> {
try (Mutex ___ = curator.lock(id, type)) {
try {
deactivateTester(tester, type);
}
catch (Exception e) {
}
curator.deleteRunData(id, type);
}
});
logs.delete(id);
curator.deleteRunData(id);
}
catch (Exception e) {
log.log(WARNING, "failed cleaning up after deleted application", e);
}
});
}
public void deactivateTester(TesterId id, JobType type) {
controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone()));
}
/** Locks all runs and modifies the list of historic runs for the given application and job type. */
private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) {
try (Mutex __ = curator.lock(id, type)) {
SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type));
modifications.accept(runs);
curator.writeHistoricRuns(id, type, runs.values());
}
}
/** Locks and modifies the run with the given id, provided it is still active. */
public void locked(RunId id, UnaryOperator<Run> modifications) {
try (Mutex __ = curator.lock(id.application(), id.type())) {
active(id).ifPresent(run -> {
curator.writeLastRun(modifications.apply(run));
});
}
}
/** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */
public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException {
try (Mutex lock = curator.lock(id, type, step)) {
for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet()))
try (Mutex __ = curator.lock(id, type, prerequisite)) { ; }
action.accept(new LockedStep(lock, step));
}
}
} | class JobController {
public static final Duration maxHistoryAge = Duration.ofDays(60);
private static final Logger log = Logger.getLogger(JobController.class.getName());
private final int historyLength;
private final Controller controller;
private final CuratorDb curator;
private final BufferedLogStore logs;
private final TesterCloud cloud;
private final JobMetrics metric;
private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { });
public JobController(Controller controller) {
this.historyLength = controller.system().isCd() ? 256 : 64;
this.controller = controller;
this.curator = controller.curator();
this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore());
this.cloud = controller.serviceRegistry().testerCloud();
this.metric = new JobMetrics(controller.metric(), controller::system);
}
public TesterCloud cloud() { return cloud; }
public int historyLength() { return historyLength; }
public void setRunner(Consumer<Run> runner) { this.runner.set(runner); }
/** Rewrite all job data with the newest format. */
public void updateStorage() {
for (ApplicationId id : instances())
for (JobType type : jobs(id)) {
locked(id, type, runs -> {
curator.readLastRun(id, type).ifPresent(curator::writeLastRun);
});
}
}
/** Returns all entries currently logged for the given run. */
public Optional<RunLog> details(RunId id) {
return details(id, -1);
}
/** Returns the logged entries for the given run, which are after the given id threshold. */
public Optional<RunLog> details(RunId id, long after) {
try (Mutex __ = curator.lock(id.application(), id.type())) {
Run run = runs(id.application(), id.type()).get(id);
if (run == null)
return Optional.empty();
return active(id).isPresent()
? Optional.of(logs.readActive(id.application(), id.type(), after))
: logs.readFinished(id, after);
}
}
/** Stores the given log entries for the given run and step. */
public void log(RunId id, Step step, List<LogEntry> entries) {
locked(id, __ -> {
logs.append(id.application(), id.type(), step, entries);
return __;
});
}
/** Stores the given log messages for the given run and step. */
public void log(RunId id, Step step, Level level, List<String> messages) {
log(id, step, messages.stream()
.map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message))
.collect(toList()));
}
/** Stores the given log message for the given run and step. */
public void log(RunId id, Step step, Level level, String message) {
log(id, step, level, Collections.singletonList(message));
}
/** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */
/** Fetches any new test log entries, and records the id of the last of these, for continuation. */
public void updateTestLog(RunId id) {
locked(id, run -> {
Optional<Step> step = Stream.of(endStagingSetup, endTests)
.filter(run.readySteps()::contains)
.findAny();
if (step.isEmpty())
return run;
List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()),
run.lastTestLogEntry());
if (entries.isEmpty())
return run;
logs.append(id.application(), id.type(), step.get(), entries);
return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong());
});
}
public void updateTestReport(RunId id) {
locked(id, run -> {
Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone()));
if (report.isEmpty()) {
return run;
}
logs.writeTestReport(id, report.get());
return run;
});
}
public Optional<String> getTestReports(RunId id) {
return logs.readTestReports(id);
}
/** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */
public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) {
locked(id, run -> run.with(testerCertificate));
}
/** Returns a list of all instances of applications which have registered. */
public List<ApplicationId> instances() {
return controller.applications().readable().stream()
.flatMap(application -> application.instances().values().stream())
.map(Instance::id)
.collect(toUnmodifiableList());
}
/** Returns all job types which have been run for the given application. */
private List<JobType> jobs(ApplicationId id) {
return JobType.allIn(controller.zoneRegistry()).stream()
.filter(type -> last(id, type).isPresent())
.collect(toUnmodifiableList());
}
/** Returns an immutable map of all known runs for the given application and job type. */
public NavigableMap<RunId, Run> runs(JobId id) {
return runs(id.application(), id.type());
}
/** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */
public List<Instant> jobStarts(JobId id) {
return runs(id).descendingMap().values().stream()
.filter(run -> ! run.isRedeployment())
.map(Run::start)
.collect(toUnmodifiableList());
}
/** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */
public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) {
return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream()
.findFirst()
.orElseGet(deployment::at);
}
/** Returns an immutable map of all known runs for the given application and job type. */
public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) {
ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number));
Optional<Run> last = last(id, type);
curator.readHistoricRuns(id, type).forEach((runId, run) -> {
if (last.isEmpty() || ! runId.equals(last.get().id()))
runs.put(runId, run);
});
last.ifPresent(run -> runs.put(run.id(), run));
return runs.build();
}
/** Returns the run with the given id, if it exists. */
public Optional<Run> run(RunId id) {
return runs(id.application(), id.type()).values().stream()
.filter(run -> run.id().equals(id))
.findAny();
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(JobId job) {
return curator.readLastRun(job.application(), job.type());
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(ApplicationId id, JobType type) {
return curator.readLastRun(id, type);
}
/** Returns the last completed of the given job. */
public Optional<Run> lastCompleted(JobId id) {
return JobStatus.lastCompleted(runs(id));
}
/** Returns the first failing of the given job. */
public Optional<Run> firstFailing(JobId id) {
return JobStatus.firstFailing(runs(id));
}
/** Returns the last success of the given job. */
public Optional<Run> lastSuccess(JobId id) {
return JobStatus.lastSuccess(runs(id));
}
/** Returns the run with the given id, provided it is still active. */
public Optional<Run> active(RunId id) {
return last(id.application(), id.type())
.filter(run -> ! run.hasEnded())
.filter(run -> run.id().equals(id));
}
/** Returns a list of all active runs. */
public List<Run> active() {
return controller.applications().idList().stream()
.flatMap(id -> active(id).stream())
.collect(toUnmodifiableList());
}
/** Returns a list of all active runs for the given application. */
public List<Run> active(TenantAndApplicationId id) {
return controller.applications().requireApplication(id).instances().keySet().stream()
.flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream()
.map(type -> last(id.instance(name), type))
.flatMap(Optional::stream)
.filter(run -> ! run.hasEnded()))
.collect(toUnmodifiableList());
}
/** Returns a list of all active runs for the given instance. */
public List<Run> active(ApplicationId id) {
return JobType.allIn(controller.zoneRegistry()).stream()
.map(type -> last(id, type))
.flatMap(Optional::stream)
.filter(run -> !run.hasEnded())
.collect(toUnmodifiableList());
}
/** Returns the job status of the given job, possibly empty. */
public JobStatus jobStatus(JobId id) {
return new JobStatus(id, runs(id));
}
/** Returns the deployment status of the given application. */
public DeploymentStatus deploymentStatus(Application application) {
return deploymentStatus(application, controller.readSystemVersion());
}
private DeploymentStatus deploymentStatus(Application application, Version systemVersion) {
return new DeploymentStatus(application,
this::jobStatus,
controller.zoneRegistry(),
systemVersion,
instance -> controller.applications().versionCompatibility(application.id().instance(instance)),
controller.clock().instant());
}
/** Adds deployment status to each of the given applications. */
public DeploymentStatusList deploymentStatuses(ApplicationList applications, Version systemVersion) {
return DeploymentStatusList.from(applications.asList().stream()
.map(application -> deploymentStatus(application, systemVersion))
.collect(toUnmodifiableList()));
}
/** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */
public DeploymentStatusList deploymentStatuses(ApplicationList applications) {
return deploymentStatuses(applications, controller.readSystemVersion());
}
/** Changes the status of the given step, for the given run, provided it is still active. */
public void update(RunId id, RunStatus status, LockedStep step) {
locked(id, run -> run.with(status, step));
}
/** Invoked when starting the step */
public void setStartTimestamp(RunId id, Instant timestamp, LockedStep step) {
locked(id, run -> run.with(timestamp, step));
}
/**
* Changes the status of the given run to inactive, and stores it as a historic run.
* Throws TimeoutException if some step in this job is still being run.
*/
public void finish(RunId id) throws TimeoutException {
Deque<Mutex> locks = new ArrayDeque<>();
try {
Run unlockedRun = run(id).get();
locks.push(curator.lock(id.application(), id.type(), report));
for (Step step : report.allPrerequisites(unlockedRun.steps().keySet()))
locks.push(curator.lock(id.application(), id.type(), step));
locked(id, run -> {
if (run.status() == reset) {
for (Step step : run.steps().keySet())
log(id, step, INFO, List.of("
return run.reset();
}
if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run;
Run finishedRun = run.finished(controller.clock().instant());
locked(id.application(), id.type(), runs -> {
runs.put(run.id(), finishedRun);
long last = id.number();
long successes = runs.values().stream().filter(Run::hasSucceeded).count();
var oldEntries = runs.entrySet().iterator();
for (var old = oldEntries.next();
old.getKey().number() <= last - historyLength
|| old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge));
old = oldEntries.next()) {
if ( successes == 1
&& old.getValue().hasSucceeded()
&& ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) {
oldEntries.next();
continue;
}
logs.delete(old.getKey());
oldEntries.remove();
}
});
logs.flush(id);
metric.jobFinished(run.id().job(), finishedRun.status());
pruneRevisions(unlockedRun);
return finishedRun;
});
}
finally {
for (Mutex lock : locks) {
try {
lock.close();
} catch (Throwable t) {
log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " +
"have been released in ZooKeeper, and if not this controller " +
"must be restarted to release the lock", t);
}
}
}
}
/** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */
public void abort(RunId id, String reason) {
locked(id, run -> {
run.stepStatuses().entrySet().stream()
.filter(entry -> entry.getValue() == unfinished)
.forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason));
return run.aborted();
});
}
/** Accepts and stores a new application package and test jar pair under a generated application version key. */
public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) {
ApplicationController applications = controller.applications();
AtomicReference<ApplicationVersion> version = new AtomicReference<>();
applications.lockApplicationOrThrow(id, application -> {
Optional<ApplicationVersion> previousVersion = application.get().revisions().last();
Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong()))
.map(ApplicationPackage::new);
long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L);
version.set(submission.toApplicationVersion(1 + previousBuild));
byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage()))
.orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage()));
applications.applicationStore().put(id.tenant(),
id.application(),
version.get().id(),
submission.applicationPackage().zippedContent(),
submission.testPackage(),
diff);
applications.applicationStore().putMeta(id.tenant(),
id.application(),
controller.clock().instant(),
submission.applicationPackage().metaDataZip());
application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId));
application = application.withRevisions(revisions -> revisions.with(version.get()));
application = withPrunedPackages(application);
TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage());
if (testSummary.problems().isEmpty())
controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage);
else
controller.notificationsDb().setNotification(NotificationSource.from(id),
Type.testPackage,
Notification.Level.warning,
testSummary.problems());
submission.applicationPackage().parentVersion().ifPresent(parent -> {
if (parent.getMajor() < controller.readSystemVersion().getMajor())
controller.notificationsDb().setNotification(NotificationSource.from(id),
Type.submission,
Notification.Level.warning,
"Parent version used to compile the application is on a " +
"lower major version than the current Vespa Cloud version");
else
controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission);
});
applications.storeWithUpdatedConfig(application, submission.applicationPackage());
applications.deploymentTrigger().triggerNewRevision(id);
});
return version.get();
}
private LockedApplication withPrunedPackages(LockedApplication application){
TenantAndApplicationId id = application.get().id();
Optional<RevisionId> oldestDeployed = application.get().oldestDeployedRevision();
if (oldestDeployed.isPresent()) {
controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed.get());
for (ApplicationVersion version : application.get().revisions().withPackage())
if (version.id().compareTo(oldestDeployed.get()) < 0)
application = application.withRevisions(revisions -> revisions.with(version.withoutPackage()));
}
return application;
}
/** Forget revisions no longer present in any relevant job history. */
private void pruneRevisions(Run run) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application());
boolean isProduction = run.versions().targetRevision().isProduction();
(isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream()
: Stream.of(jobStatus(run.id().job())))
.flatMap(jobs -> jobs.runs().values().stream())
.map(r -> r.versions().targetRevision())
.filter(id -> id.isProduction() == isProduction)
.min(naturalOrder())
.ifPresent(oldestRevision -> {
controller.applications().lockApplicationOrThrow(applicationId, application -> {
if (isProduction) {
controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number());
controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision)));
}
else {
controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number());
controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job())));
}
});
});
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) {
start(id, type, versions, isRedeployment, JobProfile.of(type), reason);
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) {
ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision());
if (revision.compileVersion()
.map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version))
.orElse(false))
throw new IllegalArgumentException("Will not start a job with incompatible platform version (" + versions.targetPlatform() + ") " +
"and compile versions (" + revision.compileVersion().get() + ")");
locked(id, type, __ -> {
Optional<Run> last = last(id, type);
if (last.flatMap(run -> active(run.id())).isPresent())
throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!");
RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1);
curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason));
metric.jobStarted(newId.job());
});
}
/** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) {
deploy(id, type, platform, applicationPackage, false);
}
/** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) {
if ( ! controller.zoneRegistry().hasZone(type.zone()))
throw new IllegalArgumentException(type.zone() + " is not present in this system");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
if ( ! application.get().instances().containsKey(id.instance()))
application = controller.applications().withNewInstance(application, id);
controller.applications().store(application);
});
DeploymentId deploymentId = new DeploymentId(id, type.zone());
Optional<Run> lastRun = last(id, type);
lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id()));
long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L);
RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type));
ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion());
byte[] diff = getDiff(applicationPackage, deploymentId, lastRun);
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff);
Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance())));
controller.applications().store(application.withRevisions(revisions -> revisions.with(version)));
start(id,
type,
new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())),
false,
dryRun ? JobProfile.developmentDryRun : JobProfile.development,
Optional.empty());
});
locked(id, type, __ -> {
runner.get().accept(last(id, type).get());
});
}
/* Application package diff against previous version, or against empty version if previous does not exist or is invalid */
private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) {
return lastRun.map(run -> run.versions().targetRevision())
.map(prevVersion -> {
ApplicationPackage previous;
try {
previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion));
} catch (IllegalArgumentException e) {
return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage);
}
return ApplicationPackageDiff.diff(previous, applicationPackage);
})
.orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage));
}
private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) {
Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion();
if (major.isPresent())
return controller.applications().lastCompatibleVersion(major.get())
.orElseThrow(() -> new IllegalArgumentException("major " + major.get() + " specified in deployment.xml, " +
"but no version on this major was found"));
VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId());
List<Version> versions = controller.readVersionStatus().deployableVersions().stream()
.map(VespaVersion::versionNumber)
.collect(toList());
instance.map(Instance::deployments)
.map(deployments -> deployments.get(id.zoneId()))
.map(Deployment::version)
.ifPresent(versions::add);
for (Version target : reversed(versions))
if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get()))
return target;
throw new IllegalArgumentException("no suitable platform version found" +
applicationPackage.compileVersion()
.map(version -> " for package compiled against " + version)
.orElse(""));
}
/** Aborts a run and waits for it complete. */
private void abortAndWait(RunId id) {
abort(id, "replaced by new deployment");
runner.get().accept(last(id.application(), id.type()).get());
while ( ! last(id.application(), id.type()).get().hasEnded()) {
try {
Thread.sleep(100);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
/** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */
public void collectGarbage() {
Set<ApplicationId> applicationsToBuild = new HashSet<>(instances());
curator.applicationsWithJobs().stream()
.filter(id -> ! applicationsToBuild.contains(id))
.forEach(id -> {
try {
TesterId tester = TesterId.of(id);
for (JobType type : jobs(id))
locked(id, type, deactivateTester, __ -> {
try (Mutex ___ = curator.lock(id, type)) {
try {
deactivateTester(tester, type);
}
catch (Exception e) {
}
curator.deleteRunData(id, type);
}
});
logs.delete(id);
curator.deleteRunData(id);
}
catch (Exception e) {
log.log(WARNING, "failed cleaning up after deleted application", e);
}
});
}
public void deactivateTester(TesterId id, JobType type) {
controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone()));
}
/** Locks all runs and modifies the list of historic runs for the given application and job type. */
private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) {
try (Mutex __ = curator.lock(id, type)) {
SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type));
modifications.accept(runs);
curator.writeHistoricRuns(id, type, runs.values());
}
}
/** Locks and modifies the run with the given id, provided it is still active. */
public void locked(RunId id, UnaryOperator<Run> modifications) {
try (Mutex __ = curator.lock(id.application(), id.type())) {
active(id).ifPresent(run -> {
curator.writeLastRun(modifications.apply(run));
});
}
}
/** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */
public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException {
try (Mutex lock = curator.lock(id, type, step)) {
for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet()))
try (Mutex __ = curator.lock(id, type, prerequisite)) { ; }
action.accept(new LockedStep(lock, step));
}
}
} |
Hmm ... so should we try to set a compile version for non-java projects as well, then? Assign the controller version at the time of submission, or use the one from deployment.xml if set? | private void validateDeprecatedElements(ApplicationPackage applicationPackage) {
for (var deprecatedElement : applicationPackage.deploymentSpec().deprecatedElements()) {
if (applicationPackage.compileVersion().isEmpty()) continue;
if (deprecatedElement.majorVersion() >= applicationPackage.compileVersion().get().getMajor()) continue;
throw new IllegalArgumentException(deprecatedElement.humanReadableString());
}
} | if (applicationPackage.compileVersion().isEmpty()) continue; | private void validateDeprecatedElements(ApplicationPackage applicationPackage) {
int wantedMajor = applicationPackage.compileVersion().map(Version::getMajor)
.or(() -> applicationPackage.deploymentSpec().majorVersion())
.or(() -> controller.readVersionStatus().controllerVersion()
.map(VespaVersion::versionNumber)
.map(Version::getMajor))
.orElseThrow(() -> new IllegalArgumentException("Could not determine wanted major version"));
for (var deprecatedElement : applicationPackage.deploymentSpec().deprecatedElements()) {
if (applicationPackage.compileVersion().isEmpty()) continue;
if (deprecatedElement.majorVersion() >= wantedMajor) continue;
throw new IllegalArgumentException(deprecatedElement.humanReadableString());
}
} | class ApplicationPackageValidator {
private final Controller controller;
private final ListFlag<String> cloudAccountsFlag;
public ApplicationPackageValidator(Controller controller) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
this.cloudAccountsFlag = PermanentFlags.CLOUD_ACCOUNTS.bindTo(controller.flagSource());
}
/**
* Validate the given application package
*
* @throws IllegalArgumentException if any validations fail
*/
public void validate(Application application, ApplicationPackage applicationPackage, Instant instant) {
validateSteps(applicationPackage.deploymentSpec());
validateCloudAccounts(application, applicationPackage.deploymentSpec());
validateEndpointRegions(applicationPackage.deploymentSpec());
validateEndpointChange(application, applicationPackage, instant);
validateCompactedEndpoint(applicationPackage);
validateSecurityClientsPem(applicationPackage);
validateDeprecatedElements(applicationPackage);
}
/** Verify that deployment spec does not use elements deprecated on a major version older than compile version */
/** Verify that we have the security/clients.pem file for public systems */
private void validateSecurityClientsPem(ApplicationPackage applicationPackage) {
if (!controller.system().isPublic() || applicationPackage.deploymentSpec().steps().isEmpty()) return;
if (applicationPackage.trustedCertificates().isEmpty())
throw new IllegalArgumentException("Missing required file 'security/clients.pem'");
}
/** Verify that each of the production zones listed in the deployment spec exist in this system */
private void validateSteps(DeploymentSpec deploymentSpec) {
for (var spec : deploymentSpec.instances()) {
new DeploymentSteps(spec, controller.zoneRegistry()).jobs();
spec.zones().stream()
.filter(zone -> zone.environment() == Environment.prod)
.forEach(zone -> {
if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(),
zone.region().orElseThrow()))) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!");
}
});
}
}
/** Verify that no single endpoint contains regions in different clouds */
private void validateEndpointRegions(DeploymentSpec deploymentSpec) {
for (var instance : deploymentSpec.instances()) {
for (var endpoint : instance.endpoints()) {
var clouds = new HashSet<CloudName>();
for (var region : endpoint.regions()) {
for (ZoneApi zone : controller.zoneRegistry().zones().all().in(Environment.prod).in(region).zones()) {
clouds.add(zone.getCloudName());
}
}
if (clouds.size() != 1) {
throw new IllegalArgumentException("Endpoint '" + endpoint.endpointId() + "' in " + instance +
" cannot contain regions in different clouds: " +
endpoint.regions().stream().sorted().collect(Collectors.toList()));
}
}
}
}
/** Verify endpoint configuration of given application package */
private void validateEndpointChange(Application application, ApplicationPackage applicationPackage, Instant instant) {
applicationPackage.deploymentSpec().instances().forEach(instance -> validateEndpointChange(application,
instance.name(),
applicationPackage,
instant));
}
/** Verify that compactable endpoint parts (instance name and endpoint ID) do not clash */
private void validateCompactedEndpoint(ApplicationPackage applicationPackage) {
Map<List<String>, InstanceEndpoint> instanceEndpoints = new HashMap<>();
for (var instanceSpec : applicationPackage.deploymentSpec().instances()) {
for (var endpoint : instanceSpec.endpoints()) {
List<String> nonCompactableIds = nonCompactableIds(instanceSpec.name(), endpoint);
InstanceEndpoint instanceEndpoint = new InstanceEndpoint(instanceSpec.name(), endpoint.endpointId());
InstanceEndpoint existingEndpoint = instanceEndpoints.get(nonCompactableIds);
if (existingEndpoint != null) {
throw new IllegalArgumentException("Endpoint with ID '" + endpoint.endpointId() + "' in instance '"
+ instanceSpec.name().value() +
"' clashes with endpoint '" + existingEndpoint.endpointId +
"' in instance '" + existingEndpoint.instance + "'");
}
instanceEndpoints.put(nonCompactableIds, instanceEndpoint);
}
}
}
/** Verify changes to endpoint configuration by comparing given application package to the existing one, if any */
private void validateEndpointChange(Application application, InstanceName instanceName, ApplicationPackage applicationPackage, Instant instant) {
var validationId = ValidationId.globalEndpointChange;
if (applicationPackage.validationOverrides().allows(validationId, instant)) return;
var endpoints = application.deploymentSpec().instance(instanceName)
.map(ApplicationPackageValidator::allEndpointsOf)
.orElseGet(List::of);
var newEndpoints = allEndpointsOf(applicationPackage.deploymentSpec().requireInstance(instanceName));
if (newEndpoints.containsAll(endpoints)) return;
if (containsAllDestinationsOf(endpoints, newEndpoints)) return;
var removedEndpoints = new ArrayList<>(endpoints);
removedEndpoints.removeAll(newEndpoints);
newEndpoints.removeAll(endpoints);
throw new IllegalArgumentException(validationId.value() + ": application '" + application.id() +
(instanceName.isDefault() ? "" : "." + instanceName.value()) +
"' has endpoints " + endpoints +
", but does not include all of these in deployment.xml. Deploying given " +
"deployment.xml will remove " + removedEndpoints +
(newEndpoints.isEmpty() ? "" : " and add " + newEndpoints) +
". " + ValidationOverrides.toAllowMessage(validationId));
}
/** Verify that declared cloud accounts are allowed to be used by the tenant */
private void validateCloudAccounts(Application application, DeploymentSpec deploymentSpec) {
TenantName tenant = application.id().tenant();
Set<CloudAccount> validAccounts = cloudAccountsFlag.with(FetchVector.Dimension.TENANT_ID, tenant.value())
.value().stream()
.map(CloudAccount::new)
.collect(Collectors.toSet());
for (var spec : deploymentSpec.instances()) {
for (var zone : spec.zones()) {
if (!zone.environment().isProduction()) continue;
Optional<CloudAccount> cloudAccount = spec.cloudAccount(zone.environment(), zone.region().get());
if (cloudAccount.isEmpty()) continue;
if (validAccounts.contains(cloudAccount.get())) continue;
throw new IllegalArgumentException("Cloud account '" + cloudAccount.get().value() +
"' is not valid for tenant '" + tenant + "'");
}
}
}
/** Returns whether newEndpoints contains all destinations in endpoints */
private static boolean containsAllDestinationsOf(List<Endpoint> endpoints, List<Endpoint> newEndpoints) {
var containsAllRegions = true;
var hasSameCluster = true;
for (var endpoint : endpoints) {
var endpointContainsAllRegions = false;
var endpointHasSameCluster = false;
for (var newEndpoint : newEndpoints) {
if (endpoint.endpointId().equals(newEndpoint.endpointId())) {
endpointContainsAllRegions = newEndpoint.regions().containsAll(endpoint.regions());
endpointHasSameCluster = newEndpoint.containerId().equals(endpoint.containerId());
}
}
containsAllRegions &= endpointContainsAllRegions;
hasSameCluster &= endpointHasSameCluster;
}
return containsAllRegions && hasSameCluster;
}
/** Returns all configued endpoints of given deployment instance spec */
private static List<Endpoint> allEndpointsOf(DeploymentInstanceSpec deploymentInstanceSpec) {
var endpoints = new ArrayList<>(deploymentInstanceSpec.endpoints());
legacyEndpoint(deploymentInstanceSpec).ifPresent(endpoints::add);
return endpoints;
}
/** Returns global service ID as an endpoint, if any global service ID is set */
private static Optional<Endpoint> legacyEndpoint(DeploymentInstanceSpec instance) {
return instance.globalServiceId().map(globalServiceId -> {
var targets = instance.zones().stream()
.filter(zone -> zone.environment().isProduction())
.flatMap(zone -> zone.region().stream())
.distinct()
.map(region -> new Endpoint.Target(region, instance.name(), 1))
.collect(Collectors.toList());
return new Endpoint(EndpointId.defaultId().id(), globalServiceId, Endpoint.Level.instance, targets);
});
}
/** Returns a list of the non-compactable IDs of given instance and endpoint */
private static List<String> nonCompactableIds(InstanceName instance, Endpoint endpoint) {
List<String> ids = new ArrayList<>(2);
if (!instance.isDefault()) {
ids.add(instance.value());
}
if (!"default".equals(endpoint.endpointId())) {
ids.add(endpoint.endpointId());
}
return ids;
}
private static class InstanceEndpoint {
private final InstanceName instance;
private final String endpointId;
public InstanceEndpoint(InstanceName instance, String endpointId) {
this.instance = instance;
this.endpointId = endpointId;
}
}
} | class ApplicationPackageValidator {
private final Controller controller;
private final ListFlag<String> cloudAccountsFlag;
public ApplicationPackageValidator(Controller controller) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
this.cloudAccountsFlag = PermanentFlags.CLOUD_ACCOUNTS.bindTo(controller.flagSource());
}
/**
* Validate the given application package
*
* @throws IllegalArgumentException if any validations fail
*/
public void validate(Application application, ApplicationPackage applicationPackage, Instant instant) {
validateSteps(applicationPackage.deploymentSpec());
validateCloudAccounts(application, applicationPackage.deploymentSpec());
validateEndpointRegions(applicationPackage.deploymentSpec());
validateEndpointChange(application, applicationPackage, instant);
validateCompactedEndpoint(applicationPackage);
validateSecurityClientsPem(applicationPackage);
validateDeprecatedElements(applicationPackage);
}
/** Verify that deployment spec does not use elements deprecated on a major version older than wanted major version */
/** Verify that we have the security/clients.pem file for public systems */
private void validateSecurityClientsPem(ApplicationPackage applicationPackage) {
if (!controller.system().isPublic() || applicationPackage.deploymentSpec().steps().isEmpty()) return;
if (applicationPackage.trustedCertificates().isEmpty())
throw new IllegalArgumentException("Missing required file 'security/clients.pem'");
}
/** Verify that each of the production zones listed in the deployment spec exist in this system */
private void validateSteps(DeploymentSpec deploymentSpec) {
for (var spec : deploymentSpec.instances()) {
new DeploymentSteps(spec, controller.zoneRegistry()).jobs();
spec.zones().stream()
.filter(zone -> zone.environment() == Environment.prod)
.forEach(zone -> {
if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(),
zone.region().orElseThrow()))) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!");
}
});
}
}
/** Verify that no single endpoint contains regions in different clouds */
private void validateEndpointRegions(DeploymentSpec deploymentSpec) {
for (var instance : deploymentSpec.instances()) {
for (var endpoint : instance.endpoints()) {
var clouds = new HashSet<CloudName>();
for (var region : endpoint.regions()) {
for (ZoneApi zone : controller.zoneRegistry().zones().all().in(Environment.prod).in(region).zones()) {
clouds.add(zone.getCloudName());
}
}
if (clouds.size() != 1) {
throw new IllegalArgumentException("Endpoint '" + endpoint.endpointId() + "' in " + instance +
" cannot contain regions in different clouds: " +
endpoint.regions().stream().sorted().collect(Collectors.toList()));
}
}
}
}
/** Verify endpoint configuration of given application package */
private void validateEndpointChange(Application application, ApplicationPackage applicationPackage, Instant instant) {
applicationPackage.deploymentSpec().instances().forEach(instance -> validateEndpointChange(application,
instance.name(),
applicationPackage,
instant));
}
/** Verify that compactable endpoint parts (instance name and endpoint ID) do not clash */
private void validateCompactedEndpoint(ApplicationPackage applicationPackage) {
Map<List<String>, InstanceEndpoint> instanceEndpoints = new HashMap<>();
for (var instanceSpec : applicationPackage.deploymentSpec().instances()) {
for (var endpoint : instanceSpec.endpoints()) {
List<String> nonCompactableIds = nonCompactableIds(instanceSpec.name(), endpoint);
InstanceEndpoint instanceEndpoint = new InstanceEndpoint(instanceSpec.name(), endpoint.endpointId());
InstanceEndpoint existingEndpoint = instanceEndpoints.get(nonCompactableIds);
if (existingEndpoint != null) {
throw new IllegalArgumentException("Endpoint with ID '" + endpoint.endpointId() + "' in instance '"
+ instanceSpec.name().value() +
"' clashes with endpoint '" + existingEndpoint.endpointId +
"' in instance '" + existingEndpoint.instance + "'");
}
instanceEndpoints.put(nonCompactableIds, instanceEndpoint);
}
}
}
/** Verify changes to endpoint configuration by comparing given application package to the existing one, if any */
private void validateEndpointChange(Application application, InstanceName instanceName, ApplicationPackage applicationPackage, Instant instant) {
var validationId = ValidationId.globalEndpointChange;
if (applicationPackage.validationOverrides().allows(validationId, instant)) return;
var endpoints = application.deploymentSpec().instance(instanceName)
.map(ApplicationPackageValidator::allEndpointsOf)
.orElseGet(List::of);
var newEndpoints = allEndpointsOf(applicationPackage.deploymentSpec().requireInstance(instanceName));
if (newEndpoints.containsAll(endpoints)) return;
if (containsAllDestinationsOf(endpoints, newEndpoints)) return;
var removedEndpoints = new ArrayList<>(endpoints);
removedEndpoints.removeAll(newEndpoints);
newEndpoints.removeAll(endpoints);
throw new IllegalArgumentException(validationId.value() + ": application '" + application.id() +
(instanceName.isDefault() ? "" : "." + instanceName.value()) +
"' has endpoints " + endpoints +
", but does not include all of these in deployment.xml. Deploying given " +
"deployment.xml will remove " + removedEndpoints +
(newEndpoints.isEmpty() ? "" : " and add " + newEndpoints) +
". " + ValidationOverrides.toAllowMessage(validationId));
}
/** Verify that declared cloud accounts are allowed to be used by the tenant */
private void validateCloudAccounts(Application application, DeploymentSpec deploymentSpec) {
TenantName tenant = application.id().tenant();
Set<CloudAccount> validAccounts = cloudAccountsFlag.with(FetchVector.Dimension.TENANT_ID, tenant.value())
.value().stream()
.map(CloudAccount::new)
.collect(Collectors.toSet());
for (var spec : deploymentSpec.instances()) {
for (var zone : spec.zones()) {
if (!zone.environment().isProduction()) continue;
Optional<CloudAccount> cloudAccount = spec.cloudAccount(zone.environment(), zone.region().get());
if (cloudAccount.isEmpty()) continue;
if (validAccounts.contains(cloudAccount.get())) continue;
throw new IllegalArgumentException("Cloud account '" + cloudAccount.get().value() +
"' is not valid for tenant '" + tenant + "'");
}
}
}
/** Returns whether newEndpoints contains all destinations in endpoints */
private static boolean containsAllDestinationsOf(List<Endpoint> endpoints, List<Endpoint> newEndpoints) {
var containsAllRegions = true;
var hasSameCluster = true;
for (var endpoint : endpoints) {
var endpointContainsAllRegions = false;
var endpointHasSameCluster = false;
for (var newEndpoint : newEndpoints) {
if (endpoint.endpointId().equals(newEndpoint.endpointId())) {
endpointContainsAllRegions = newEndpoint.regions().containsAll(endpoint.regions());
endpointHasSameCluster = newEndpoint.containerId().equals(endpoint.containerId());
}
}
containsAllRegions &= endpointContainsAllRegions;
hasSameCluster &= endpointHasSameCluster;
}
return containsAllRegions && hasSameCluster;
}
/** Returns all configued endpoints of given deployment instance spec */
private static List<Endpoint> allEndpointsOf(DeploymentInstanceSpec deploymentInstanceSpec) {
var endpoints = new ArrayList<>(deploymentInstanceSpec.endpoints());
legacyEndpoint(deploymentInstanceSpec).ifPresent(endpoints::add);
return endpoints;
}
/** Returns global service ID as an endpoint, if any global service ID is set */
private static Optional<Endpoint> legacyEndpoint(DeploymentInstanceSpec instance) {
return instance.globalServiceId().map(globalServiceId -> {
var targets = instance.zones().stream()
.filter(zone -> zone.environment().isProduction())
.flatMap(zone -> zone.region().stream())
.distinct()
.map(region -> new Endpoint.Target(region, instance.name(), 1))
.collect(Collectors.toList());
return new Endpoint(EndpointId.defaultId().id(), globalServiceId, Endpoint.Level.instance, targets);
});
}
/** Returns a list of the non-compactable IDs of given instance and endpoint */
private static List<String> nonCompactableIds(InstanceName instance, Endpoint endpoint) {
List<String> ids = new ArrayList<>(2);
if (!instance.isDefault()) {
ids.add(instance.value());
}
if (!"default".equals(endpoint.endpointId())) {
ids.add(endpoint.endpointId());
}
return ids;
}
private static class InstanceEndpoint {
private final InstanceName instance;
private final String endpointId;
public InstanceEndpoint(InstanceName instance, String endpointId) {
this.instance = instance;
this.endpointId = endpointId;
}
}
} |
Yes, that makes sense. I'll fix. | private void validateDeprecatedElements(ApplicationPackage applicationPackage) {
for (var deprecatedElement : applicationPackage.deploymentSpec().deprecatedElements()) {
if (applicationPackage.compileVersion().isEmpty()) continue;
if (deprecatedElement.majorVersion() >= applicationPackage.compileVersion().get().getMajor()) continue;
throw new IllegalArgumentException(deprecatedElement.humanReadableString());
}
} | if (applicationPackage.compileVersion().isEmpty()) continue; | private void validateDeprecatedElements(ApplicationPackage applicationPackage) {
int wantedMajor = applicationPackage.compileVersion().map(Version::getMajor)
.or(() -> applicationPackage.deploymentSpec().majorVersion())
.or(() -> controller.readVersionStatus().controllerVersion()
.map(VespaVersion::versionNumber)
.map(Version::getMajor))
.orElseThrow(() -> new IllegalArgumentException("Could not determine wanted major version"));
for (var deprecatedElement : applicationPackage.deploymentSpec().deprecatedElements()) {
if (applicationPackage.compileVersion().isEmpty()) continue;
if (deprecatedElement.majorVersion() >= wantedMajor) continue;
throw new IllegalArgumentException(deprecatedElement.humanReadableString());
}
} | class ApplicationPackageValidator {
private final Controller controller;
private final ListFlag<String> cloudAccountsFlag;
public ApplicationPackageValidator(Controller controller) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
this.cloudAccountsFlag = PermanentFlags.CLOUD_ACCOUNTS.bindTo(controller.flagSource());
}
/**
* Validate the given application package
*
* @throws IllegalArgumentException if any validations fail
*/
public void validate(Application application, ApplicationPackage applicationPackage, Instant instant) {
validateSteps(applicationPackage.deploymentSpec());
validateCloudAccounts(application, applicationPackage.deploymentSpec());
validateEndpointRegions(applicationPackage.deploymentSpec());
validateEndpointChange(application, applicationPackage, instant);
validateCompactedEndpoint(applicationPackage);
validateSecurityClientsPem(applicationPackage);
validateDeprecatedElements(applicationPackage);
}
/** Verify that deployment spec does not use elements deprecated on a major version older than compile version */
/** Verify that we have the security/clients.pem file for public systems */
private void validateSecurityClientsPem(ApplicationPackage applicationPackage) {
if (!controller.system().isPublic() || applicationPackage.deploymentSpec().steps().isEmpty()) return;
if (applicationPackage.trustedCertificates().isEmpty())
throw new IllegalArgumentException("Missing required file 'security/clients.pem'");
}
/** Verify that each of the production zones listed in the deployment spec exist in this system */
private void validateSteps(DeploymentSpec deploymentSpec) {
for (var spec : deploymentSpec.instances()) {
new DeploymentSteps(spec, controller.zoneRegistry()).jobs();
spec.zones().stream()
.filter(zone -> zone.environment() == Environment.prod)
.forEach(zone -> {
if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(),
zone.region().orElseThrow()))) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!");
}
});
}
}
/** Verify that no single endpoint contains regions in different clouds */
private void validateEndpointRegions(DeploymentSpec deploymentSpec) {
for (var instance : deploymentSpec.instances()) {
for (var endpoint : instance.endpoints()) {
var clouds = new HashSet<CloudName>();
for (var region : endpoint.regions()) {
for (ZoneApi zone : controller.zoneRegistry().zones().all().in(Environment.prod).in(region).zones()) {
clouds.add(zone.getCloudName());
}
}
if (clouds.size() != 1) {
throw new IllegalArgumentException("Endpoint '" + endpoint.endpointId() + "' in " + instance +
" cannot contain regions in different clouds: " +
endpoint.regions().stream().sorted().collect(Collectors.toList()));
}
}
}
}
/** Verify endpoint configuration of given application package */
private void validateEndpointChange(Application application, ApplicationPackage applicationPackage, Instant instant) {
applicationPackage.deploymentSpec().instances().forEach(instance -> validateEndpointChange(application,
instance.name(),
applicationPackage,
instant));
}
/** Verify that compactable endpoint parts (instance name and endpoint ID) do not clash */
private void validateCompactedEndpoint(ApplicationPackage applicationPackage) {
Map<List<String>, InstanceEndpoint> instanceEndpoints = new HashMap<>();
for (var instanceSpec : applicationPackage.deploymentSpec().instances()) {
for (var endpoint : instanceSpec.endpoints()) {
List<String> nonCompactableIds = nonCompactableIds(instanceSpec.name(), endpoint);
InstanceEndpoint instanceEndpoint = new InstanceEndpoint(instanceSpec.name(), endpoint.endpointId());
InstanceEndpoint existingEndpoint = instanceEndpoints.get(nonCompactableIds);
if (existingEndpoint != null) {
throw new IllegalArgumentException("Endpoint with ID '" + endpoint.endpointId() + "' in instance '"
+ instanceSpec.name().value() +
"' clashes with endpoint '" + existingEndpoint.endpointId +
"' in instance '" + existingEndpoint.instance + "'");
}
instanceEndpoints.put(nonCompactableIds, instanceEndpoint);
}
}
}
/** Verify changes to endpoint configuration by comparing given application package to the existing one, if any */
private void validateEndpointChange(Application application, InstanceName instanceName, ApplicationPackage applicationPackage, Instant instant) {
var validationId = ValidationId.globalEndpointChange;
if (applicationPackage.validationOverrides().allows(validationId, instant)) return;
var endpoints = application.deploymentSpec().instance(instanceName)
.map(ApplicationPackageValidator::allEndpointsOf)
.orElseGet(List::of);
var newEndpoints = allEndpointsOf(applicationPackage.deploymentSpec().requireInstance(instanceName));
if (newEndpoints.containsAll(endpoints)) return;
if (containsAllDestinationsOf(endpoints, newEndpoints)) return;
var removedEndpoints = new ArrayList<>(endpoints);
removedEndpoints.removeAll(newEndpoints);
newEndpoints.removeAll(endpoints);
throw new IllegalArgumentException(validationId.value() + ": application '" + application.id() +
(instanceName.isDefault() ? "" : "." + instanceName.value()) +
"' has endpoints " + endpoints +
", but does not include all of these in deployment.xml. Deploying given " +
"deployment.xml will remove " + removedEndpoints +
(newEndpoints.isEmpty() ? "" : " and add " + newEndpoints) +
". " + ValidationOverrides.toAllowMessage(validationId));
}
/** Verify that declared cloud accounts are allowed to be used by the tenant */
private void validateCloudAccounts(Application application, DeploymentSpec deploymentSpec) {
TenantName tenant = application.id().tenant();
Set<CloudAccount> validAccounts = cloudAccountsFlag.with(FetchVector.Dimension.TENANT_ID, tenant.value())
.value().stream()
.map(CloudAccount::new)
.collect(Collectors.toSet());
for (var spec : deploymentSpec.instances()) {
for (var zone : spec.zones()) {
if (!zone.environment().isProduction()) continue;
Optional<CloudAccount> cloudAccount = spec.cloudAccount(zone.environment(), zone.region().get());
if (cloudAccount.isEmpty()) continue;
if (validAccounts.contains(cloudAccount.get())) continue;
throw new IllegalArgumentException("Cloud account '" + cloudAccount.get().value() +
"' is not valid for tenant '" + tenant + "'");
}
}
}
/** Returns whether newEndpoints contains all destinations in endpoints */
private static boolean containsAllDestinationsOf(List<Endpoint> endpoints, List<Endpoint> newEndpoints) {
var containsAllRegions = true;
var hasSameCluster = true;
for (var endpoint : endpoints) {
var endpointContainsAllRegions = false;
var endpointHasSameCluster = false;
for (var newEndpoint : newEndpoints) {
if (endpoint.endpointId().equals(newEndpoint.endpointId())) {
endpointContainsAllRegions = newEndpoint.regions().containsAll(endpoint.regions());
endpointHasSameCluster = newEndpoint.containerId().equals(endpoint.containerId());
}
}
containsAllRegions &= endpointContainsAllRegions;
hasSameCluster &= endpointHasSameCluster;
}
return containsAllRegions && hasSameCluster;
}
/** Returns all configued endpoints of given deployment instance spec */
private static List<Endpoint> allEndpointsOf(DeploymentInstanceSpec deploymentInstanceSpec) {
var endpoints = new ArrayList<>(deploymentInstanceSpec.endpoints());
legacyEndpoint(deploymentInstanceSpec).ifPresent(endpoints::add);
return endpoints;
}
/** Returns global service ID as an endpoint, if any global service ID is set */
private static Optional<Endpoint> legacyEndpoint(DeploymentInstanceSpec instance) {
return instance.globalServiceId().map(globalServiceId -> {
var targets = instance.zones().stream()
.filter(zone -> zone.environment().isProduction())
.flatMap(zone -> zone.region().stream())
.distinct()
.map(region -> new Endpoint.Target(region, instance.name(), 1))
.collect(Collectors.toList());
return new Endpoint(EndpointId.defaultId().id(), globalServiceId, Endpoint.Level.instance, targets);
});
}
/** Returns a list of the non-compactable IDs of given instance and endpoint */
private static List<String> nonCompactableIds(InstanceName instance, Endpoint endpoint) {
List<String> ids = new ArrayList<>(2);
if (!instance.isDefault()) {
ids.add(instance.value());
}
if (!"default".equals(endpoint.endpointId())) {
ids.add(endpoint.endpointId());
}
return ids;
}
private static class InstanceEndpoint {
private final InstanceName instance;
private final String endpointId;
public InstanceEndpoint(InstanceName instance, String endpointId) {
this.instance = instance;
this.endpointId = endpointId;
}
}
} | class ApplicationPackageValidator {
private final Controller controller;
private final ListFlag<String> cloudAccountsFlag;
public ApplicationPackageValidator(Controller controller) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
this.cloudAccountsFlag = PermanentFlags.CLOUD_ACCOUNTS.bindTo(controller.flagSource());
}
/**
* Validate the given application package
*
* @throws IllegalArgumentException if any validations fail
*/
public void validate(Application application, ApplicationPackage applicationPackage, Instant instant) {
validateSteps(applicationPackage.deploymentSpec());
validateCloudAccounts(application, applicationPackage.deploymentSpec());
validateEndpointRegions(applicationPackage.deploymentSpec());
validateEndpointChange(application, applicationPackage, instant);
validateCompactedEndpoint(applicationPackage);
validateSecurityClientsPem(applicationPackage);
validateDeprecatedElements(applicationPackage);
}
/** Verify that deployment spec does not use elements deprecated on a major version older than wanted major version */
/** Verify that we have the security/clients.pem file for public systems */
private void validateSecurityClientsPem(ApplicationPackage applicationPackage) {
if (!controller.system().isPublic() || applicationPackage.deploymentSpec().steps().isEmpty()) return;
if (applicationPackage.trustedCertificates().isEmpty())
throw new IllegalArgumentException("Missing required file 'security/clients.pem'");
}
/** Verify that each of the production zones listed in the deployment spec exist in this system */
private void validateSteps(DeploymentSpec deploymentSpec) {
for (var spec : deploymentSpec.instances()) {
new DeploymentSteps(spec, controller.zoneRegistry()).jobs();
spec.zones().stream()
.filter(zone -> zone.environment() == Environment.prod)
.forEach(zone -> {
if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(),
zone.region().orElseThrow()))) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!");
}
});
}
}
/** Verify that no single endpoint contains regions in different clouds */
private void validateEndpointRegions(DeploymentSpec deploymentSpec) {
for (var instance : deploymentSpec.instances()) {
for (var endpoint : instance.endpoints()) {
var clouds = new HashSet<CloudName>();
for (var region : endpoint.regions()) {
for (ZoneApi zone : controller.zoneRegistry().zones().all().in(Environment.prod).in(region).zones()) {
clouds.add(zone.getCloudName());
}
}
if (clouds.size() != 1) {
throw new IllegalArgumentException("Endpoint '" + endpoint.endpointId() + "' in " + instance +
" cannot contain regions in different clouds: " +
endpoint.regions().stream().sorted().collect(Collectors.toList()));
}
}
}
}
/** Verify endpoint configuration of given application package */
private void validateEndpointChange(Application application, ApplicationPackage applicationPackage, Instant instant) {
applicationPackage.deploymentSpec().instances().forEach(instance -> validateEndpointChange(application,
instance.name(),
applicationPackage,
instant));
}
/** Verify that compactable endpoint parts (instance name and endpoint ID) do not clash */
private void validateCompactedEndpoint(ApplicationPackage applicationPackage) {
Map<List<String>, InstanceEndpoint> instanceEndpoints = new HashMap<>();
for (var instanceSpec : applicationPackage.deploymentSpec().instances()) {
for (var endpoint : instanceSpec.endpoints()) {
List<String> nonCompactableIds = nonCompactableIds(instanceSpec.name(), endpoint);
InstanceEndpoint instanceEndpoint = new InstanceEndpoint(instanceSpec.name(), endpoint.endpointId());
InstanceEndpoint existingEndpoint = instanceEndpoints.get(nonCompactableIds);
if (existingEndpoint != null) {
throw new IllegalArgumentException("Endpoint with ID '" + endpoint.endpointId() + "' in instance '"
+ instanceSpec.name().value() +
"' clashes with endpoint '" + existingEndpoint.endpointId +
"' in instance '" + existingEndpoint.instance + "'");
}
instanceEndpoints.put(nonCompactableIds, instanceEndpoint);
}
}
}
/** Verify changes to endpoint configuration by comparing given application package to the existing one, if any */
private void validateEndpointChange(Application application, InstanceName instanceName, ApplicationPackage applicationPackage, Instant instant) {
var validationId = ValidationId.globalEndpointChange;
if (applicationPackage.validationOverrides().allows(validationId, instant)) return;
var endpoints = application.deploymentSpec().instance(instanceName)
.map(ApplicationPackageValidator::allEndpointsOf)
.orElseGet(List::of);
var newEndpoints = allEndpointsOf(applicationPackage.deploymentSpec().requireInstance(instanceName));
if (newEndpoints.containsAll(endpoints)) return;
if (containsAllDestinationsOf(endpoints, newEndpoints)) return;
var removedEndpoints = new ArrayList<>(endpoints);
removedEndpoints.removeAll(newEndpoints);
newEndpoints.removeAll(endpoints);
throw new IllegalArgumentException(validationId.value() + ": application '" + application.id() +
(instanceName.isDefault() ? "" : "." + instanceName.value()) +
"' has endpoints " + endpoints +
", but does not include all of these in deployment.xml. Deploying given " +
"deployment.xml will remove " + removedEndpoints +
(newEndpoints.isEmpty() ? "" : " and add " + newEndpoints) +
". " + ValidationOverrides.toAllowMessage(validationId));
}
/** Verify that declared cloud accounts are allowed to be used by the tenant */
private void validateCloudAccounts(Application application, DeploymentSpec deploymentSpec) {
TenantName tenant = application.id().tenant();
Set<CloudAccount> validAccounts = cloudAccountsFlag.with(FetchVector.Dimension.TENANT_ID, tenant.value())
.value().stream()
.map(CloudAccount::new)
.collect(Collectors.toSet());
for (var spec : deploymentSpec.instances()) {
for (var zone : spec.zones()) {
if (!zone.environment().isProduction()) continue;
Optional<CloudAccount> cloudAccount = spec.cloudAccount(zone.environment(), zone.region().get());
if (cloudAccount.isEmpty()) continue;
if (validAccounts.contains(cloudAccount.get())) continue;
throw new IllegalArgumentException("Cloud account '" + cloudAccount.get().value() +
"' is not valid for tenant '" + tenant + "'");
}
}
}
/** Returns whether newEndpoints contains all destinations in endpoints */
private static boolean containsAllDestinationsOf(List<Endpoint> endpoints, List<Endpoint> newEndpoints) {
var containsAllRegions = true;
var hasSameCluster = true;
for (var endpoint : endpoints) {
var endpointContainsAllRegions = false;
var endpointHasSameCluster = false;
for (var newEndpoint : newEndpoints) {
if (endpoint.endpointId().equals(newEndpoint.endpointId())) {
endpointContainsAllRegions = newEndpoint.regions().containsAll(endpoint.regions());
endpointHasSameCluster = newEndpoint.containerId().equals(endpoint.containerId());
}
}
containsAllRegions &= endpointContainsAllRegions;
hasSameCluster &= endpointHasSameCluster;
}
return containsAllRegions && hasSameCluster;
}
/** Returns all configued endpoints of given deployment instance spec */
private static List<Endpoint> allEndpointsOf(DeploymentInstanceSpec deploymentInstanceSpec) {
var endpoints = new ArrayList<>(deploymentInstanceSpec.endpoints());
legacyEndpoint(deploymentInstanceSpec).ifPresent(endpoints::add);
return endpoints;
}
/** Returns global service ID as an endpoint, if any global service ID is set */
private static Optional<Endpoint> legacyEndpoint(DeploymentInstanceSpec instance) {
return instance.globalServiceId().map(globalServiceId -> {
var targets = instance.zones().stream()
.filter(zone -> zone.environment().isProduction())
.flatMap(zone -> zone.region().stream())
.distinct()
.map(region -> new Endpoint.Target(region, instance.name(), 1))
.collect(Collectors.toList());
return new Endpoint(EndpointId.defaultId().id(), globalServiceId, Endpoint.Level.instance, targets);
});
}
/** Returns a list of the non-compactable IDs of given instance and endpoint */
private static List<String> nonCompactableIds(InstanceName instance, Endpoint endpoint) {
List<String> ids = new ArrayList<>(2);
if (!instance.isDefault()) {
ids.add(instance.value());
}
if (!"default".equals(endpoint.endpointId())) {
ids.add(endpoint.endpointId());
}
return ids;
}
private static class InstanceEndpoint {
private final InstanceName instance;
private final String endpointId;
public InstanceEndpoint(InstanceName instance, String endpointId) {
this.instance = instance;
this.endpointId = endpointId;
}
}
} |
`search-definitions`? Shouldn't it be either `schemas` or `searchdefinitions` (if there is some backwards compatibility issue stopping us from using `schemas`). Or maybe it's not used at all in controller, if so it can be removed. | public ApplicationPackage build() {
ByteArrayOutputStream zip = new ByteArrayOutputStream();
try (ZipOutputStream out = new ZipOutputStream(zip)) {
out.setLevel(Deflater.NO_COMPRESSION);
writeZipEntry(out, "deployment.xml", deploymentSpec());
writeZipEntry(out, "validation-overrides.xml", validationOverrides());
writeZipEntry(out, "search-definitions/test.sd", searchDefinition());
writeZipEntry(out, "build-meta.json", buildMeta(compileVersion));
if (!trustedCertificates.isEmpty()) {
writeZipEntry(out, "security/clients.pem", X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8));
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return new ApplicationPackage(zip.toByteArray());
} | writeZipEntry(out, "search-definitions/test.sd", searchDefinition()); | public ApplicationPackage build() {
ByteArrayOutputStream zip = new ByteArrayOutputStream();
try (ZipOutputStream out = new ZipOutputStream(zip)) {
out.setLevel(Deflater.NO_COMPRESSION);
writeZipEntry(out, "deployment.xml", deploymentSpec());
writeZipEntry(out, "validation-overrides.xml", validationOverrides());
writeZipEntry(out, "search-definitions/test.sd", searchDefinition());
writeZipEntry(out, "build-meta.json", buildMeta(compileVersion));
if (!trustedCertificates.isEmpty()) {
writeZipEntry(out, "security/clients.pem", X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8));
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return new ApplicationPackage(zip.toByteArray());
} | class ApplicationPackageBuilder {
private final StringBuilder prodBody = new StringBuilder();
private final StringBuilder validationOverridesBody = new StringBuilder();
private final StringBuilder blockChange = new StringBuilder();
private final StringJoiner notifications = new StringJoiner("/>\n <email ",
"<notifications>\n <email ",
"/>\n</notifications>\n").setEmptyValue("");
private final StringBuilder endpointsBody = new StringBuilder();
private final StringBuilder applicationEndpointsBody = new StringBuilder();
private final List<X509Certificate> trustedCertificates = new ArrayList<>();
private OptionalInt majorVersion = OptionalInt.empty();
private String instances = "default";
private String upgradePolicy = null;
private String revisionTarget = "latest";
private String revisionChange = "always";
private String upgradeRollout = null;
private String globalServiceId = null;
private String athenzIdentityAttributes = "athenz-domain='domain' athenz-service='service'";
private String searchDefinition = "search test { }";
private boolean explicitSystemTest = false;
private boolean explicitStagingTest = false;
private Version compileVersion = Version.fromString("6.1");
private String cloudAccount = null;
public ApplicationPackageBuilder majorVersion(int majorVersion) {
this.majorVersion = OptionalInt.of(majorVersion);
return this;
}
public ApplicationPackageBuilder instances(String instances) {
this.instances = instances;
return this;
}
public ApplicationPackageBuilder upgradePolicy(String upgradePolicy) {
this.upgradePolicy = upgradePolicy;
return this;
}
public ApplicationPackageBuilder revisionTarget(String revisionTarget) {
this.revisionTarget = revisionTarget;
return this;
}
public ApplicationPackageBuilder revisionChange(String revisionChange) {
this.revisionChange = revisionChange;
return this;
}
public ApplicationPackageBuilder upgradeRollout(String upgradeRollout) {
this.upgradeRollout = upgradeRollout;
return this;
}
public ApplicationPackageBuilder globalServiceId(String globalServiceId) {
this.globalServiceId = globalServiceId;
return this;
}
public ApplicationPackageBuilder endpoint(String id, String containerId, String... regions) {
endpointsBody.append(" <endpoint");
endpointsBody.append(" id='").append(id).append("'");
endpointsBody.append(" container-id='").append(containerId).append("'");
endpointsBody.append(">\n");
for (var region : regions) {
endpointsBody.append(" <region>").append(region).append("</region>\n");
}
endpointsBody.append(" </endpoint>\n");
return this;
}
public ApplicationPackageBuilder applicationEndpoint(String id, String containerId, String region,
Map<InstanceName, Integer> instanceWeights) {
if (instanceWeights.isEmpty()) throw new IllegalArgumentException("At least one instance must be given");
applicationEndpointsBody.append(" <endpoint");
applicationEndpointsBody.append(" id='").append(id).append("'");
applicationEndpointsBody.append(" container-id='").append(containerId).append("'");
applicationEndpointsBody.append(" region='").append(region).append("'");
applicationEndpointsBody.append(">\n");
for (var kv : new TreeMap<>(instanceWeights).entrySet()) {
applicationEndpointsBody.append(" <instance weight='").append(kv.getValue().toString()).append("'>")
.append(kv.getKey().value())
.append("</instance>\n");
}
applicationEndpointsBody.append(" </endpoint>\n");
return this;
}
public ApplicationPackageBuilder systemTest() {
explicitSystemTest = true;
return this;
}
public ApplicationPackageBuilder stagingTest() {
explicitStagingTest = true;
return this;
}
public ApplicationPackageBuilder region(RegionName regionName) {
return region(regionName, true);
}
public ApplicationPackageBuilder region(String regionName) {
prodBody.append(" <region>")
.append(regionName)
.append("</region>\n");
return this;
}
public ApplicationPackageBuilder region(RegionName regionName, boolean active) {
prodBody.append(" <region active='")
.append(active)
.append("'>")
.append(regionName.value())
.append("</region>\n");
return this;
}
public ApplicationPackageBuilder test(String regionName) {
prodBody.append(" <test>");
prodBody.append(regionName);
prodBody.append("</test>\n");
return this;
}
public ApplicationPackageBuilder parallel(String... regionName) {
prodBody.append(" <parallel>\n");
Arrays.stream(regionName).forEach(this::region);
prodBody.append(" </parallel>\n");
return this;
}
public ApplicationPackageBuilder delay(Duration delay) {
prodBody.append(" <delay seconds='");
prodBody.append(delay.getSeconds());
prodBody.append("'/>\n");
return this;
}
public ApplicationPackageBuilder blockChange(boolean revision, boolean version, String daySpec, String hourSpec,
String zoneSpec) {
blockChange.append(" <block-change");
blockChange.append(" revision='").append(revision).append("'");
blockChange.append(" version='").append(version).append("'");
blockChange.append(" days='").append(daySpec).append("'");
blockChange.append(" hours='").append(hourSpec).append("'");
blockChange.append(" time-zone='").append(zoneSpec).append("'");
blockChange.append("/>\n");
return this;
}
public ApplicationPackageBuilder allow(ValidationId validationId) {
validationOverridesBody.append(" <allow until='");
validationOverridesBody.append(asIso8601Date(Instant.now().plus(Duration.ofDays(28))));
validationOverridesBody.append("'>");
validationOverridesBody.append(validationId.value());
validationOverridesBody.append("</allow>\n");
return this;
}
public ApplicationPackageBuilder compileVersion(Version version) {
compileVersion = version;
return this;
}
public ApplicationPackageBuilder athenzIdentity(AthenzDomain domain, AthenzService service) {
this.athenzIdentityAttributes = Text.format("athenz-domain='%s' athenz-service='%s'", domain.value(),
service.value());
return this;
}
public ApplicationPackageBuilder withoutAthenzIdentity() {
this.athenzIdentityAttributes = null;
return this;
}
public ApplicationPackageBuilder emailRole(String role) {
this.notifications.add("role=\"" + role + "\"");
return this;
}
public ApplicationPackageBuilder emailAddress(String address) {
this.notifications.add("address=\"" + address + "\"");
return this;
}
/** Sets the content of the search definition test.sd */
public ApplicationPackageBuilder searchDefinition(String testSearchDefinition) {
this.searchDefinition = testSearchDefinition;
return this;
}
/** Add a trusted certificate to security/clients.pem */
public ApplicationPackageBuilder trust(X509Certificate certificate) {
this.trustedCertificates.add(certificate);
return this;
}
/** Add a default trusted certificate to security/clients.pem */
public ApplicationPackageBuilder trustDefaultCertificate() {
try {
var generator = KeyPairGenerator.getInstance("RSA");
var certificate = X509CertificateBuilder.fromKeypair(
generator.generateKeyPair(),
new X500Principal("CN=name"),
Instant.now(),
Instant.now().plusMillis(300_000),
SignatureAlgorithm.SHA256_WITH_RSA,
X509CertificateBuilder.generateRandomSerialNumber()
).build();
return trust(certificate);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
}
public ApplicationPackageBuilder cloudAccount(String cloudAccount) {
this.cloudAccount = cloudAccount;
return this;
}
private byte[] deploymentSpec() {
StringBuilder xml = new StringBuilder();
xml.append("<deployment version='1.0' ");
majorVersion.ifPresent(v -> xml.append("major-version='").append(v).append("' "));
if (athenzIdentityAttributes != null) {
xml.append(athenzIdentityAttributes);
}
if (cloudAccount != null) {
xml.append(" cloud-account='");
xml.append(cloudAccount);
xml.append("'");
}
xml.append(">\n");
for (String instance : instances.split(",")) {
xml.append(" <instance id='").append(instance).append("'>\n");
if (upgradePolicy != null || revisionTarget != null || revisionChange != null || upgradeRollout != null) {
xml.append(" <upgrade ");
if (upgradePolicy != null) xml.append("policy='").append(upgradePolicy).append("' ");
if (revisionTarget != null) xml.append("revision-target='").append(revisionTarget).append("' ");
if (revisionChange != null) xml.append("revision-change='").append(revisionChange).append("' ");
if (upgradeRollout != null) xml.append("rollout='").append(upgradeRollout).append("' ");
xml.append("/>\n");
}
xml.append(notifications);
if (explicitSystemTest)
xml.append(" <test />\n");
if (explicitStagingTest)
xml.append(" <staging />\n");
xml.append(blockChange);
xml.append(" <prod");
if (globalServiceId != null) {
xml.append(" global-service-id='");
xml.append(globalServiceId);
xml.append("'");
}
xml.append(">\n");
xml.append(prodBody);
xml.append(" </prod>\n");
if (endpointsBody.length() > 0) {
xml.append(" <endpoints>\n");
xml.append(endpointsBody);
xml.append(" </endpoints>\n");
}
xml.append(" </instance>\n");
}
if (applicationEndpointsBody.length() > 0) {
xml.append(" <endpoints>\n");
xml.append(applicationEndpointsBody);
xml.append(" </endpoints>\n");
}
xml.append("</deployment>\n");
return xml.toString().getBytes(UTF_8);
}
private byte[] validationOverrides() {
String xml = "<validation-overrides version='1.0'>\n" +
validationOverridesBody +
"</validation-overrides>\n";
return xml.getBytes(UTF_8);
}
private byte[] searchDefinition() {
return searchDefinition.getBytes(UTF_8);
}
private static byte[] buildMeta(Version compileVersion) {
return ("{\"compileVersion\":\"" + compileVersion.toFullString() +
"\",\"buildTime\":1000,\"parentVersion\":\"" +
compileVersion.toFullString() + "\"}").getBytes(UTF_8);
}
private void writeZipEntry(ZipOutputStream out, String name, byte[] content) throws IOException {
ZipEntry entry = new ZipEntry(name);
out.putNextEntry(entry);
out.write(content);
out.closeEntry();
}
private static String asIso8601Date(Instant instant) {
return new SimpleDateFormat("yyyy-MM-dd").format(Date.from(instant));
}
public static ApplicationPackage fromDeploymentXml(String deploymentXml) {
ByteArrayOutputStream zip = new ByteArrayOutputStream();
try (ZipOutputStream out = new ZipOutputStream(zip)) {
out.putNextEntry(new ZipEntry("deployment.xml"));
out.write(deploymentXml.getBytes(UTF_8));
out.closeEntry();
out.putNextEntry(new ZipEntry("build-meta.json"));
out.write(buildMeta(Version.fromString("6.1")));
out.closeEntry();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return new ApplicationPackage(zip.toByteArray());
}
} | class ApplicationPackageBuilder {
private final StringBuilder prodBody = new StringBuilder();
private final StringBuilder validationOverridesBody = new StringBuilder();
private final StringBuilder blockChange = new StringBuilder();
private final StringJoiner notifications = new StringJoiner("/>\n <email ",
"<notifications>\n <email ",
"/>\n</notifications>\n").setEmptyValue("");
private final StringBuilder endpointsBody = new StringBuilder();
private final StringBuilder applicationEndpointsBody = new StringBuilder();
private final List<X509Certificate> trustedCertificates = new ArrayList<>();
private OptionalInt majorVersion = OptionalInt.empty();
private String instances = "default";
private String upgradePolicy = null;
private String revisionTarget = "latest";
private String revisionChange = "always";
private String upgradeRollout = null;
private String globalServiceId = null;
private String athenzIdentityAttributes = "athenz-domain='domain' athenz-service='service'";
private String searchDefinition = "search test { }";
private boolean explicitSystemTest = false;
private boolean explicitStagingTest = false;
private Version compileVersion = Version.fromString("6.1");
private String cloudAccount = null;
public ApplicationPackageBuilder majorVersion(int majorVersion) {
this.majorVersion = OptionalInt.of(majorVersion);
return this;
}
public ApplicationPackageBuilder instances(String instances) {
this.instances = instances;
return this;
}
public ApplicationPackageBuilder upgradePolicy(String upgradePolicy) {
this.upgradePolicy = upgradePolicy;
return this;
}
public ApplicationPackageBuilder revisionTarget(String revisionTarget) {
this.revisionTarget = revisionTarget;
return this;
}
public ApplicationPackageBuilder revisionChange(String revisionChange) {
this.revisionChange = revisionChange;
return this;
}
public ApplicationPackageBuilder upgradeRollout(String upgradeRollout) {
this.upgradeRollout = upgradeRollout;
return this;
}
public ApplicationPackageBuilder globalServiceId(String globalServiceId) {
this.globalServiceId = globalServiceId;
return this;
}
public ApplicationPackageBuilder endpoint(String id, String containerId, String... regions) {
endpointsBody.append(" <endpoint");
endpointsBody.append(" id='").append(id).append("'");
endpointsBody.append(" container-id='").append(containerId).append("'");
endpointsBody.append(">\n");
for (var region : regions) {
endpointsBody.append(" <region>").append(region).append("</region>\n");
}
endpointsBody.append(" </endpoint>\n");
return this;
}
public ApplicationPackageBuilder applicationEndpoint(String id, String containerId, String region,
Map<InstanceName, Integer> instanceWeights) {
if (instanceWeights.isEmpty()) throw new IllegalArgumentException("At least one instance must be given");
applicationEndpointsBody.append(" <endpoint");
applicationEndpointsBody.append(" id='").append(id).append("'");
applicationEndpointsBody.append(" container-id='").append(containerId).append("'");
applicationEndpointsBody.append(" region='").append(region).append("'");
applicationEndpointsBody.append(">\n");
for (var kv : new TreeMap<>(instanceWeights).entrySet()) {
applicationEndpointsBody.append(" <instance weight='").append(kv.getValue().toString()).append("'>")
.append(kv.getKey().value())
.append("</instance>\n");
}
applicationEndpointsBody.append(" </endpoint>\n");
return this;
}
public ApplicationPackageBuilder systemTest() {
explicitSystemTest = true;
return this;
}
public ApplicationPackageBuilder stagingTest() {
explicitStagingTest = true;
return this;
}
public ApplicationPackageBuilder region(RegionName regionName) {
return region(regionName, true);
}
public ApplicationPackageBuilder region(String regionName) {
prodBody.append(" <region>")
.append(regionName)
.append("</region>\n");
return this;
}
public ApplicationPackageBuilder region(RegionName regionName, boolean active) {
prodBody.append(" <region active='")
.append(active)
.append("'>")
.append(regionName.value())
.append("</region>\n");
return this;
}
public ApplicationPackageBuilder test(String regionName) {
prodBody.append(" <test>");
prodBody.append(regionName);
prodBody.append("</test>\n");
return this;
}
public ApplicationPackageBuilder parallel(String... regionName) {
prodBody.append(" <parallel>\n");
Arrays.stream(regionName).forEach(this::region);
prodBody.append(" </parallel>\n");
return this;
}
public ApplicationPackageBuilder delay(Duration delay) {
prodBody.append(" <delay seconds='");
prodBody.append(delay.getSeconds());
prodBody.append("'/>\n");
return this;
}
public ApplicationPackageBuilder blockChange(boolean revision, boolean version, String daySpec, String hourSpec,
String zoneSpec) {
blockChange.append(" <block-change");
blockChange.append(" revision='").append(revision).append("'");
blockChange.append(" version='").append(version).append("'");
blockChange.append(" days='").append(daySpec).append("'");
blockChange.append(" hours='").append(hourSpec).append("'");
blockChange.append(" time-zone='").append(zoneSpec).append("'");
blockChange.append("/>\n");
return this;
}
public ApplicationPackageBuilder allow(ValidationId validationId) {
validationOverridesBody.append(" <allow until='");
validationOverridesBody.append(asIso8601Date(Instant.now().plus(Duration.ofDays(28))));
validationOverridesBody.append("'>");
validationOverridesBody.append(validationId.value());
validationOverridesBody.append("</allow>\n");
return this;
}
public ApplicationPackageBuilder compileVersion(Version version) {
compileVersion = version;
return this;
}
public ApplicationPackageBuilder athenzIdentity(AthenzDomain domain, AthenzService service) {
this.athenzIdentityAttributes = Text.format("athenz-domain='%s' athenz-service='%s'", domain.value(),
service.value());
return this;
}
public ApplicationPackageBuilder withoutAthenzIdentity() {
this.athenzIdentityAttributes = null;
return this;
}
public ApplicationPackageBuilder emailRole(String role) {
this.notifications.add("role=\"" + role + "\"");
return this;
}
public ApplicationPackageBuilder emailAddress(String address) {
this.notifications.add("address=\"" + address + "\"");
return this;
}
/** Sets the content of the search definition test.sd */
public ApplicationPackageBuilder searchDefinition(String testSearchDefinition) {
this.searchDefinition = testSearchDefinition;
return this;
}
/** Add a trusted certificate to security/clients.pem */
public ApplicationPackageBuilder trust(X509Certificate certificate) {
this.trustedCertificates.add(certificate);
return this;
}
/** Add a default trusted certificate to security/clients.pem */
public ApplicationPackageBuilder trustDefaultCertificate() {
try {
var generator = KeyPairGenerator.getInstance("RSA");
var certificate = X509CertificateBuilder.fromKeypair(
generator.generateKeyPair(),
new X500Principal("CN=name"),
Instant.now(),
Instant.now().plusMillis(300_000),
SignatureAlgorithm.SHA256_WITH_RSA,
X509CertificateBuilder.generateRandomSerialNumber()
).build();
return trust(certificate);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
}
public ApplicationPackageBuilder cloudAccount(String cloudAccount) {
this.cloudAccount = cloudAccount;
return this;
}
private byte[] deploymentSpec() {
StringBuilder xml = new StringBuilder();
xml.append("<deployment version='1.0' ");
majorVersion.ifPresent(v -> xml.append("major-version='").append(v).append("' "));
if (athenzIdentityAttributes != null) {
xml.append(athenzIdentityAttributes);
}
if (cloudAccount != null) {
xml.append(" cloud-account='");
xml.append(cloudAccount);
xml.append("'");
}
xml.append(">\n");
for (String instance : instances.split(",")) {
xml.append(" <instance id='").append(instance).append("'>\n");
if (upgradePolicy != null || revisionTarget != null || revisionChange != null || upgradeRollout != null) {
xml.append(" <upgrade ");
if (upgradePolicy != null) xml.append("policy='").append(upgradePolicy).append("' ");
if (revisionTarget != null) xml.append("revision-target='").append(revisionTarget).append("' ");
if (revisionChange != null) xml.append("revision-change='").append(revisionChange).append("' ");
if (upgradeRollout != null) xml.append("rollout='").append(upgradeRollout).append("' ");
xml.append("/>\n");
}
xml.append(notifications);
if (explicitSystemTest)
xml.append(" <test />\n");
if (explicitStagingTest)
xml.append(" <staging />\n");
xml.append(blockChange);
xml.append(" <prod");
if (globalServiceId != null) {
xml.append(" global-service-id='");
xml.append(globalServiceId);
xml.append("'");
}
xml.append(">\n");
xml.append(prodBody);
xml.append(" </prod>\n");
if (endpointsBody.length() > 0) {
xml.append(" <endpoints>\n");
xml.append(endpointsBody);
xml.append(" </endpoints>\n");
}
xml.append(" </instance>\n");
}
if (applicationEndpointsBody.length() > 0) {
xml.append(" <endpoints>\n");
xml.append(applicationEndpointsBody);
xml.append(" </endpoints>\n");
}
xml.append("</deployment>\n");
return xml.toString().getBytes(UTF_8);
}
private byte[] validationOverrides() {
String xml = "<validation-overrides version='1.0'>\n" +
validationOverridesBody +
"</validation-overrides>\n";
return xml.getBytes(UTF_8);
}
private byte[] searchDefinition() {
return searchDefinition.getBytes(UTF_8);
}
private static byte[] buildMeta(Version compileVersion) {
return ("{\"compileVersion\":\"" + compileVersion.toFullString() +
"\",\"buildTime\":1000,\"parentVersion\":\"" +
compileVersion.toFullString() + "\"}").getBytes(UTF_8);
}
private void writeZipEntry(ZipOutputStream out, String name, byte[] content) throws IOException {
ZipEntry entry = new ZipEntry(name);
out.putNextEntry(entry);
out.write(content);
out.closeEntry();
}
private static String asIso8601Date(Instant instant) {
return new SimpleDateFormat("yyyy-MM-dd").format(Date.from(instant));
}
public static ApplicationPackage fromDeploymentXml(String deploymentXml) {
ByteArrayOutputStream zip = new ByteArrayOutputStream();
try (ZipOutputStream out = new ZipOutputStream(zip)) {
out.putNextEntry(new ZipEntry("deployment.xml"));
out.write(deploymentXml.getBytes(UTF_8));
out.closeEntry();
out.putNextEntry(new ZipEntry("build-meta.json"));
out.write(buildMeta(Version.fromString("6.1")));
out.closeEntry();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return new ApplicationPackage(zip.toByteArray());
}
} |
No issue changing this I guess, I didn't look for it. | public ApplicationPackage build() {
ByteArrayOutputStream zip = new ByteArrayOutputStream();
try (ZipOutputStream out = new ZipOutputStream(zip)) {
out.setLevel(Deflater.NO_COMPRESSION);
writeZipEntry(out, "deployment.xml", deploymentSpec());
writeZipEntry(out, "validation-overrides.xml", validationOverrides());
writeZipEntry(out, "search-definitions/test.sd", searchDefinition());
writeZipEntry(out, "build-meta.json", buildMeta(compileVersion));
if (!trustedCertificates.isEmpty()) {
writeZipEntry(out, "security/clients.pem", X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8));
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return new ApplicationPackage(zip.toByteArray());
} | writeZipEntry(out, "search-definitions/test.sd", searchDefinition()); | public ApplicationPackage build() {
ByteArrayOutputStream zip = new ByteArrayOutputStream();
try (ZipOutputStream out = new ZipOutputStream(zip)) {
out.setLevel(Deflater.NO_COMPRESSION);
writeZipEntry(out, "deployment.xml", deploymentSpec());
writeZipEntry(out, "validation-overrides.xml", validationOverrides());
writeZipEntry(out, "search-definitions/test.sd", searchDefinition());
writeZipEntry(out, "build-meta.json", buildMeta(compileVersion));
if (!trustedCertificates.isEmpty()) {
writeZipEntry(out, "security/clients.pem", X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8));
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return new ApplicationPackage(zip.toByteArray());
} | class ApplicationPackageBuilder {
private final StringBuilder prodBody = new StringBuilder();
private final StringBuilder validationOverridesBody = new StringBuilder();
private final StringBuilder blockChange = new StringBuilder();
private final StringJoiner notifications = new StringJoiner("/>\n <email ",
"<notifications>\n <email ",
"/>\n</notifications>\n").setEmptyValue("");
private final StringBuilder endpointsBody = new StringBuilder();
private final StringBuilder applicationEndpointsBody = new StringBuilder();
private final List<X509Certificate> trustedCertificates = new ArrayList<>();
private OptionalInt majorVersion = OptionalInt.empty();
private String instances = "default";
private String upgradePolicy = null;
private String revisionTarget = "latest";
private String revisionChange = "always";
private String upgradeRollout = null;
private String globalServiceId = null;
private String athenzIdentityAttributes = "athenz-domain='domain' athenz-service='service'";
private String searchDefinition = "search test { }";
private boolean explicitSystemTest = false;
private boolean explicitStagingTest = false;
private Version compileVersion = Version.fromString("6.1");
private String cloudAccount = null;
public ApplicationPackageBuilder majorVersion(int majorVersion) {
this.majorVersion = OptionalInt.of(majorVersion);
return this;
}
public ApplicationPackageBuilder instances(String instances) {
this.instances = instances;
return this;
}
public ApplicationPackageBuilder upgradePolicy(String upgradePolicy) {
this.upgradePolicy = upgradePolicy;
return this;
}
public ApplicationPackageBuilder revisionTarget(String revisionTarget) {
this.revisionTarget = revisionTarget;
return this;
}
public ApplicationPackageBuilder revisionChange(String revisionChange) {
this.revisionChange = revisionChange;
return this;
}
public ApplicationPackageBuilder upgradeRollout(String upgradeRollout) {
this.upgradeRollout = upgradeRollout;
return this;
}
public ApplicationPackageBuilder globalServiceId(String globalServiceId) {
this.globalServiceId = globalServiceId;
return this;
}
public ApplicationPackageBuilder endpoint(String id, String containerId, String... regions) {
endpointsBody.append(" <endpoint");
endpointsBody.append(" id='").append(id).append("'");
endpointsBody.append(" container-id='").append(containerId).append("'");
endpointsBody.append(">\n");
for (var region : regions) {
endpointsBody.append(" <region>").append(region).append("</region>\n");
}
endpointsBody.append(" </endpoint>\n");
return this;
}
public ApplicationPackageBuilder applicationEndpoint(String id, String containerId, String region,
Map<InstanceName, Integer> instanceWeights) {
if (instanceWeights.isEmpty()) throw new IllegalArgumentException("At least one instance must be given");
applicationEndpointsBody.append(" <endpoint");
applicationEndpointsBody.append(" id='").append(id).append("'");
applicationEndpointsBody.append(" container-id='").append(containerId).append("'");
applicationEndpointsBody.append(" region='").append(region).append("'");
applicationEndpointsBody.append(">\n");
for (var kv : new TreeMap<>(instanceWeights).entrySet()) {
applicationEndpointsBody.append(" <instance weight='").append(kv.getValue().toString()).append("'>")
.append(kv.getKey().value())
.append("</instance>\n");
}
applicationEndpointsBody.append(" </endpoint>\n");
return this;
}
public ApplicationPackageBuilder systemTest() {
explicitSystemTest = true;
return this;
}
public ApplicationPackageBuilder stagingTest() {
explicitStagingTest = true;
return this;
}
public ApplicationPackageBuilder region(RegionName regionName) {
return region(regionName, true);
}
public ApplicationPackageBuilder region(String regionName) {
prodBody.append(" <region>")
.append(regionName)
.append("</region>\n");
return this;
}
public ApplicationPackageBuilder region(RegionName regionName, boolean active) {
prodBody.append(" <region active='")
.append(active)
.append("'>")
.append(regionName.value())
.append("</region>\n");
return this;
}
public ApplicationPackageBuilder test(String regionName) {
prodBody.append(" <test>");
prodBody.append(regionName);
prodBody.append("</test>\n");
return this;
}
public ApplicationPackageBuilder parallel(String... regionName) {
prodBody.append(" <parallel>\n");
Arrays.stream(regionName).forEach(this::region);
prodBody.append(" </parallel>\n");
return this;
}
public ApplicationPackageBuilder delay(Duration delay) {
prodBody.append(" <delay seconds='");
prodBody.append(delay.getSeconds());
prodBody.append("'/>\n");
return this;
}
public ApplicationPackageBuilder blockChange(boolean revision, boolean version, String daySpec, String hourSpec,
String zoneSpec) {
blockChange.append(" <block-change");
blockChange.append(" revision='").append(revision).append("'");
blockChange.append(" version='").append(version).append("'");
blockChange.append(" days='").append(daySpec).append("'");
blockChange.append(" hours='").append(hourSpec).append("'");
blockChange.append(" time-zone='").append(zoneSpec).append("'");
blockChange.append("/>\n");
return this;
}
public ApplicationPackageBuilder allow(ValidationId validationId) {
validationOverridesBody.append(" <allow until='");
validationOverridesBody.append(asIso8601Date(Instant.now().plus(Duration.ofDays(28))));
validationOverridesBody.append("'>");
validationOverridesBody.append(validationId.value());
validationOverridesBody.append("</allow>\n");
return this;
}
public ApplicationPackageBuilder compileVersion(Version version) {
compileVersion = version;
return this;
}
public ApplicationPackageBuilder athenzIdentity(AthenzDomain domain, AthenzService service) {
this.athenzIdentityAttributes = Text.format("athenz-domain='%s' athenz-service='%s'", domain.value(),
service.value());
return this;
}
public ApplicationPackageBuilder withoutAthenzIdentity() {
this.athenzIdentityAttributes = null;
return this;
}
public ApplicationPackageBuilder emailRole(String role) {
this.notifications.add("role=\"" + role + "\"");
return this;
}
public ApplicationPackageBuilder emailAddress(String address) {
this.notifications.add("address=\"" + address + "\"");
return this;
}
/** Sets the content of the search definition test.sd */
public ApplicationPackageBuilder searchDefinition(String testSearchDefinition) {
this.searchDefinition = testSearchDefinition;
return this;
}
/** Add a trusted certificate to security/clients.pem */
public ApplicationPackageBuilder trust(X509Certificate certificate) {
this.trustedCertificates.add(certificate);
return this;
}
/** Add a default trusted certificate to security/clients.pem */
public ApplicationPackageBuilder trustDefaultCertificate() {
try {
var generator = KeyPairGenerator.getInstance("RSA");
var certificate = X509CertificateBuilder.fromKeypair(
generator.generateKeyPair(),
new X500Principal("CN=name"),
Instant.now(),
Instant.now().plusMillis(300_000),
SignatureAlgorithm.SHA256_WITH_RSA,
X509CertificateBuilder.generateRandomSerialNumber()
).build();
return trust(certificate);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
}
public ApplicationPackageBuilder cloudAccount(String cloudAccount) {
this.cloudAccount = cloudAccount;
return this;
}
private byte[] deploymentSpec() {
StringBuilder xml = new StringBuilder();
xml.append("<deployment version='1.0' ");
majorVersion.ifPresent(v -> xml.append("major-version='").append(v).append("' "));
if (athenzIdentityAttributes != null) {
xml.append(athenzIdentityAttributes);
}
if (cloudAccount != null) {
xml.append(" cloud-account='");
xml.append(cloudAccount);
xml.append("'");
}
xml.append(">\n");
for (String instance : instances.split(",")) {
xml.append(" <instance id='").append(instance).append("'>\n");
if (upgradePolicy != null || revisionTarget != null || revisionChange != null || upgradeRollout != null) {
xml.append(" <upgrade ");
if (upgradePolicy != null) xml.append("policy='").append(upgradePolicy).append("' ");
if (revisionTarget != null) xml.append("revision-target='").append(revisionTarget).append("' ");
if (revisionChange != null) xml.append("revision-change='").append(revisionChange).append("' ");
if (upgradeRollout != null) xml.append("rollout='").append(upgradeRollout).append("' ");
xml.append("/>\n");
}
xml.append(notifications);
if (explicitSystemTest)
xml.append(" <test />\n");
if (explicitStagingTest)
xml.append(" <staging />\n");
xml.append(blockChange);
xml.append(" <prod");
if (globalServiceId != null) {
xml.append(" global-service-id='");
xml.append(globalServiceId);
xml.append("'");
}
xml.append(">\n");
xml.append(prodBody);
xml.append(" </prod>\n");
if (endpointsBody.length() > 0) {
xml.append(" <endpoints>\n");
xml.append(endpointsBody);
xml.append(" </endpoints>\n");
}
xml.append(" </instance>\n");
}
if (applicationEndpointsBody.length() > 0) {
xml.append(" <endpoints>\n");
xml.append(applicationEndpointsBody);
xml.append(" </endpoints>\n");
}
xml.append("</deployment>\n");
return xml.toString().getBytes(UTF_8);
}
private byte[] validationOverrides() {
String xml = "<validation-overrides version='1.0'>\n" +
validationOverridesBody +
"</validation-overrides>\n";
return xml.getBytes(UTF_8);
}
private byte[] searchDefinition() {
return searchDefinition.getBytes(UTF_8);
}
private static byte[] buildMeta(Version compileVersion) {
return ("{\"compileVersion\":\"" + compileVersion.toFullString() +
"\",\"buildTime\":1000,\"parentVersion\":\"" +
compileVersion.toFullString() + "\"}").getBytes(UTF_8);
}
private void writeZipEntry(ZipOutputStream out, String name, byte[] content) throws IOException {
ZipEntry entry = new ZipEntry(name);
out.putNextEntry(entry);
out.write(content);
out.closeEntry();
}
private static String asIso8601Date(Instant instant) {
return new SimpleDateFormat("yyyy-MM-dd").format(Date.from(instant));
}
public static ApplicationPackage fromDeploymentXml(String deploymentXml) {
ByteArrayOutputStream zip = new ByteArrayOutputStream();
try (ZipOutputStream out = new ZipOutputStream(zip)) {
out.putNextEntry(new ZipEntry("deployment.xml"));
out.write(deploymentXml.getBytes(UTF_8));
out.closeEntry();
out.putNextEntry(new ZipEntry("build-meta.json"));
out.write(buildMeta(Version.fromString("6.1")));
out.closeEntry();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return new ApplicationPackage(zip.toByteArray());
}
} | class ApplicationPackageBuilder {
private final StringBuilder prodBody = new StringBuilder();
private final StringBuilder validationOverridesBody = new StringBuilder();
private final StringBuilder blockChange = new StringBuilder();
private final StringJoiner notifications = new StringJoiner("/>\n <email ",
"<notifications>\n <email ",
"/>\n</notifications>\n").setEmptyValue("");
private final StringBuilder endpointsBody = new StringBuilder();
private final StringBuilder applicationEndpointsBody = new StringBuilder();
private final List<X509Certificate> trustedCertificates = new ArrayList<>();
private OptionalInt majorVersion = OptionalInt.empty();
private String instances = "default";
private String upgradePolicy = null;
private String revisionTarget = "latest";
private String revisionChange = "always";
private String upgradeRollout = null;
private String globalServiceId = null;
private String athenzIdentityAttributes = "athenz-domain='domain' athenz-service='service'";
private String searchDefinition = "search test { }";
private boolean explicitSystemTest = false;
private boolean explicitStagingTest = false;
private Version compileVersion = Version.fromString("6.1");
private String cloudAccount = null;
public ApplicationPackageBuilder majorVersion(int majorVersion) {
this.majorVersion = OptionalInt.of(majorVersion);
return this;
}
public ApplicationPackageBuilder instances(String instances) {
this.instances = instances;
return this;
}
public ApplicationPackageBuilder upgradePolicy(String upgradePolicy) {
this.upgradePolicy = upgradePolicy;
return this;
}
public ApplicationPackageBuilder revisionTarget(String revisionTarget) {
this.revisionTarget = revisionTarget;
return this;
}
public ApplicationPackageBuilder revisionChange(String revisionChange) {
this.revisionChange = revisionChange;
return this;
}
public ApplicationPackageBuilder upgradeRollout(String upgradeRollout) {
this.upgradeRollout = upgradeRollout;
return this;
}
public ApplicationPackageBuilder globalServiceId(String globalServiceId) {
this.globalServiceId = globalServiceId;
return this;
}
public ApplicationPackageBuilder endpoint(String id, String containerId, String... regions) {
endpointsBody.append(" <endpoint");
endpointsBody.append(" id='").append(id).append("'");
endpointsBody.append(" container-id='").append(containerId).append("'");
endpointsBody.append(">\n");
for (var region : regions) {
endpointsBody.append(" <region>").append(region).append("</region>\n");
}
endpointsBody.append(" </endpoint>\n");
return this;
}
public ApplicationPackageBuilder applicationEndpoint(String id, String containerId, String region,
Map<InstanceName, Integer> instanceWeights) {
if (instanceWeights.isEmpty()) throw new IllegalArgumentException("At least one instance must be given");
applicationEndpointsBody.append(" <endpoint");
applicationEndpointsBody.append(" id='").append(id).append("'");
applicationEndpointsBody.append(" container-id='").append(containerId).append("'");
applicationEndpointsBody.append(" region='").append(region).append("'");
applicationEndpointsBody.append(">\n");
for (var kv : new TreeMap<>(instanceWeights).entrySet()) {
applicationEndpointsBody.append(" <instance weight='").append(kv.getValue().toString()).append("'>")
.append(kv.getKey().value())
.append("</instance>\n");
}
applicationEndpointsBody.append(" </endpoint>\n");
return this;
}
public ApplicationPackageBuilder systemTest() {
explicitSystemTest = true;
return this;
}
public ApplicationPackageBuilder stagingTest() {
explicitStagingTest = true;
return this;
}
public ApplicationPackageBuilder region(RegionName regionName) {
return region(regionName, true);
}
public ApplicationPackageBuilder region(String regionName) {
prodBody.append(" <region>")
.append(regionName)
.append("</region>\n");
return this;
}
public ApplicationPackageBuilder region(RegionName regionName, boolean active) {
prodBody.append(" <region active='")
.append(active)
.append("'>")
.append(regionName.value())
.append("</region>\n");
return this;
}
public ApplicationPackageBuilder test(String regionName) {
prodBody.append(" <test>");
prodBody.append(regionName);
prodBody.append("</test>\n");
return this;
}
public ApplicationPackageBuilder parallel(String... regionName) {
prodBody.append(" <parallel>\n");
Arrays.stream(regionName).forEach(this::region);
prodBody.append(" </parallel>\n");
return this;
}
public ApplicationPackageBuilder delay(Duration delay) {
prodBody.append(" <delay seconds='");
prodBody.append(delay.getSeconds());
prodBody.append("'/>\n");
return this;
}
public ApplicationPackageBuilder blockChange(boolean revision, boolean version, String daySpec, String hourSpec,
String zoneSpec) {
blockChange.append(" <block-change");
blockChange.append(" revision='").append(revision).append("'");
blockChange.append(" version='").append(version).append("'");
blockChange.append(" days='").append(daySpec).append("'");
blockChange.append(" hours='").append(hourSpec).append("'");
blockChange.append(" time-zone='").append(zoneSpec).append("'");
blockChange.append("/>\n");
return this;
}
public ApplicationPackageBuilder allow(ValidationId validationId) {
validationOverridesBody.append(" <allow until='");
validationOverridesBody.append(asIso8601Date(Instant.now().plus(Duration.ofDays(28))));
validationOverridesBody.append("'>");
validationOverridesBody.append(validationId.value());
validationOverridesBody.append("</allow>\n");
return this;
}
public ApplicationPackageBuilder compileVersion(Version version) {
compileVersion = version;
return this;
}
public ApplicationPackageBuilder athenzIdentity(AthenzDomain domain, AthenzService service) {
this.athenzIdentityAttributes = Text.format("athenz-domain='%s' athenz-service='%s'", domain.value(),
service.value());
return this;
}
public ApplicationPackageBuilder withoutAthenzIdentity() {
this.athenzIdentityAttributes = null;
return this;
}
public ApplicationPackageBuilder emailRole(String role) {
this.notifications.add("role=\"" + role + "\"");
return this;
}
public ApplicationPackageBuilder emailAddress(String address) {
this.notifications.add("address=\"" + address + "\"");
return this;
}
/** Sets the content of the search definition test.sd */
public ApplicationPackageBuilder searchDefinition(String testSearchDefinition) {
this.searchDefinition = testSearchDefinition;
return this;
}
/** Add a trusted certificate to security/clients.pem */
public ApplicationPackageBuilder trust(X509Certificate certificate) {
this.trustedCertificates.add(certificate);
return this;
}
/** Add a default trusted certificate to security/clients.pem */
public ApplicationPackageBuilder trustDefaultCertificate() {
try {
var generator = KeyPairGenerator.getInstance("RSA");
var certificate = X509CertificateBuilder.fromKeypair(
generator.generateKeyPair(),
new X500Principal("CN=name"),
Instant.now(),
Instant.now().plusMillis(300_000),
SignatureAlgorithm.SHA256_WITH_RSA,
X509CertificateBuilder.generateRandomSerialNumber()
).build();
return trust(certificate);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
}
public ApplicationPackageBuilder cloudAccount(String cloudAccount) {
this.cloudAccount = cloudAccount;
return this;
}
private byte[] deploymentSpec() {
StringBuilder xml = new StringBuilder();
xml.append("<deployment version='1.0' ");
majorVersion.ifPresent(v -> xml.append("major-version='").append(v).append("' "));
if (athenzIdentityAttributes != null) {
xml.append(athenzIdentityAttributes);
}
if (cloudAccount != null) {
xml.append(" cloud-account='");
xml.append(cloudAccount);
xml.append("'");
}
xml.append(">\n");
for (String instance : instances.split(",")) {
xml.append(" <instance id='").append(instance).append("'>\n");
if (upgradePolicy != null || revisionTarget != null || revisionChange != null || upgradeRollout != null) {
xml.append(" <upgrade ");
if (upgradePolicy != null) xml.append("policy='").append(upgradePolicy).append("' ");
if (revisionTarget != null) xml.append("revision-target='").append(revisionTarget).append("' ");
if (revisionChange != null) xml.append("revision-change='").append(revisionChange).append("' ");
if (upgradeRollout != null) xml.append("rollout='").append(upgradeRollout).append("' ");
xml.append("/>\n");
}
xml.append(notifications);
if (explicitSystemTest)
xml.append(" <test />\n");
if (explicitStagingTest)
xml.append(" <staging />\n");
xml.append(blockChange);
xml.append(" <prod");
if (globalServiceId != null) {
xml.append(" global-service-id='");
xml.append(globalServiceId);
xml.append("'");
}
xml.append(">\n");
xml.append(prodBody);
xml.append(" </prod>\n");
if (endpointsBody.length() > 0) {
xml.append(" <endpoints>\n");
xml.append(endpointsBody);
xml.append(" </endpoints>\n");
}
xml.append(" </instance>\n");
}
if (applicationEndpointsBody.length() > 0) {
xml.append(" <endpoints>\n");
xml.append(applicationEndpointsBody);
xml.append(" </endpoints>\n");
}
xml.append("</deployment>\n");
return xml.toString().getBytes(UTF_8);
}
private byte[] validationOverrides() {
String xml = "<validation-overrides version='1.0'>\n" +
validationOverridesBody +
"</validation-overrides>\n";
return xml.getBytes(UTF_8);
}
private byte[] searchDefinition() {
return searchDefinition.getBytes(UTF_8);
}
private static byte[] buildMeta(Version compileVersion) {
return ("{\"compileVersion\":\"" + compileVersion.toFullString() +
"\",\"buildTime\":1000,\"parentVersion\":\"" +
compileVersion.toFullString() + "\"}").getBytes(UTF_8);
}
private void writeZipEntry(ZipOutputStream out, String name, byte[] content) throws IOException {
ZipEntry entry = new ZipEntry(name);
out.putNextEntry(entry);
out.write(content);
out.closeEntry();
}
private static String asIso8601Date(Instant instant) {
return new SimpleDateFormat("yyyy-MM-dd").format(Date.from(instant));
}
public static ApplicationPackage fromDeploymentXml(String deploymentXml) {
ByteArrayOutputStream zip = new ByteArrayOutputStream();
try (ZipOutputStream out = new ZipOutputStream(zip)) {
out.putNextEntry(new ZipEntry("deployment.xml"));
out.write(deploymentXml.getBytes(UTF_8));
out.closeEntry();
out.putNextEntry(new ZipEntry("build-meta.json"));
out.write(buildMeta(Version.fromString("6.1")));
out.closeEntry();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return new ApplicationPackage(zip.toByteArray());
}
} |
Re: #22967 consider removing this | private void handleCreateVisitorReply(CreateVisitorReply reply) {
CreateVisitorMessage msg = (CreateVisitorMessage)reply.getMessage();
BucketId superbucket = msg.getBuckets().get(0);
BucketId subBucketProgress = reply.getLastBucket();
log.log(Level.FINE, () -> sessionName + ": received CreateVisitorReply for bucket " +
superbucket + " with progress " + subBucketProgress);
progress.getIterator().update(superbucket, subBucketProgress);
params.getControlHandler().onProgress(progress.getToken());
statistics.add(reply.getVisitorStatistics());
params.getControlHandler().onVisitorStatistics(statistics);
if ( ! reply.getTrace().getRoot().isEmpty() && (trace.getRoot().getNumChildren() < 1000)) {
trace.getRoot().addChild(reply.getTrace().getRoot());
}
if (params.getDynamicallyIncreaseMaxBucketsPerVisitor()) {
int newMaxBuckets = Math.max(Math.min((int)(params.getMaxBucketsPerVisitor()
* params.getDynamicMaxBucketsIncreaseFactor()), 128), 1);
params.setMaxBucketsPerVisitor(newMaxBuckets);
log.log(Level.FINE, () -> sessionName + ": increasing max buckets per visitor to "
+ params.getMaxBucketsPerVisitor());
}
} | if (params.getDynamicallyIncreaseMaxBucketsPerVisitor()) { | private void handleCreateVisitorReply(CreateVisitorReply reply) {
CreateVisitorMessage msg = (CreateVisitorMessage)reply.getMessage();
BucketId superbucket = msg.getBuckets().get(0);
BucketId subBucketProgress = reply.getLastBucket();
log.log(Level.FINE, () -> sessionName + ": received CreateVisitorReply for bucket " +
superbucket + " with progress " + subBucketProgress);
progress.getIterator().update(superbucket, subBucketProgress);
params.getControlHandler().onProgress(progress.getToken());
statistics.add(reply.getVisitorStatistics());
params.getControlHandler().onVisitorStatistics(statistics);
if ( ! reply.getTrace().getRoot().isEmpty() && (trace.getRoot().getNumChildren() < 1000)) {
trace.getRoot().addChild(reply.getTrace().getRoot());
}
if (params.getDynamicallyIncreaseMaxBucketsPerVisitor()) {
int newMaxBuckets = Math.max(Math.min((int)(params.getMaxBucketsPerVisitor()
* params.getDynamicMaxBucketsIncreaseFactor()), 128), 1);
params.setMaxBucketsPerVisitor(newMaxBuckets);
log.log(Level.FINE, () -> sessionName + ": increasing max buckets per visitor to "
+ params.getMaxBucketsPerVisitor());
}
} | class HandleMessageTask implements Runnable {
private final Message message;
private HandleMessageTask(Message message) {
this.message = message;
}
@Override
public void run() {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Visitor session " + sessionName + ": Received message " + message);
}
try {
if (message instanceof VisitorInfoMessage) {
handleVisitorInfoMessage((VisitorInfoMessage)message);
} else {
handleDocumentMessage((DocumentMessage)message);
}
} catch (Throwable t) {
com.yahoo.protect.Process.logAndDie("Caught unhandled error when processing message", t);
}
}
} | class HandleMessageTask implements Runnable {
private final Message message;
private HandleMessageTask(Message message) {
this.message = message;
}
@Override
public void run() {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Visitor session " + sessionName + ": Received message " + message);
}
try {
if (message instanceof VisitorInfoMessage) {
handleVisitorInfoMessage((VisitorInfoMessage)message);
} else {
handleDocumentMessage((DocumentMessage)message);
}
} catch (Throwable t) {
com.yahoo.protect.Process.logAndDie("Caught unhandled error when processing message", t);
}
}
} |
yes, next pr :-) | private void handleCreateVisitorReply(CreateVisitorReply reply) {
CreateVisitorMessage msg = (CreateVisitorMessage)reply.getMessage();
BucketId superbucket = msg.getBuckets().get(0);
BucketId subBucketProgress = reply.getLastBucket();
log.log(Level.FINE, () -> sessionName + ": received CreateVisitorReply for bucket " +
superbucket + " with progress " + subBucketProgress);
progress.getIterator().update(superbucket, subBucketProgress);
params.getControlHandler().onProgress(progress.getToken());
statistics.add(reply.getVisitorStatistics());
params.getControlHandler().onVisitorStatistics(statistics);
if ( ! reply.getTrace().getRoot().isEmpty() && (trace.getRoot().getNumChildren() < 1000)) {
trace.getRoot().addChild(reply.getTrace().getRoot());
}
if (params.getDynamicallyIncreaseMaxBucketsPerVisitor()) {
int newMaxBuckets = Math.max(Math.min((int)(params.getMaxBucketsPerVisitor()
* params.getDynamicMaxBucketsIncreaseFactor()), 128), 1);
params.setMaxBucketsPerVisitor(newMaxBuckets);
log.log(Level.FINE, () -> sessionName + ": increasing max buckets per visitor to "
+ params.getMaxBucketsPerVisitor());
}
} | if (params.getDynamicallyIncreaseMaxBucketsPerVisitor()) { | private void handleCreateVisitorReply(CreateVisitorReply reply) {
CreateVisitorMessage msg = (CreateVisitorMessage)reply.getMessage();
BucketId superbucket = msg.getBuckets().get(0);
BucketId subBucketProgress = reply.getLastBucket();
log.log(Level.FINE, () -> sessionName + ": received CreateVisitorReply for bucket " +
superbucket + " with progress " + subBucketProgress);
progress.getIterator().update(superbucket, subBucketProgress);
params.getControlHandler().onProgress(progress.getToken());
statistics.add(reply.getVisitorStatistics());
params.getControlHandler().onVisitorStatistics(statistics);
if ( ! reply.getTrace().getRoot().isEmpty() && (trace.getRoot().getNumChildren() < 1000)) {
trace.getRoot().addChild(reply.getTrace().getRoot());
}
if (params.getDynamicallyIncreaseMaxBucketsPerVisitor()) {
int newMaxBuckets = Math.max(Math.min((int)(params.getMaxBucketsPerVisitor()
* params.getDynamicMaxBucketsIncreaseFactor()), 128), 1);
params.setMaxBucketsPerVisitor(newMaxBuckets);
log.log(Level.FINE, () -> sessionName + ": increasing max buckets per visitor to "
+ params.getMaxBucketsPerVisitor());
}
} | class HandleMessageTask implements Runnable {
private final Message message;
private HandleMessageTask(Message message) {
this.message = message;
}
@Override
public void run() {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Visitor session " + sessionName + ": Received message " + message);
}
try {
if (message instanceof VisitorInfoMessage) {
handleVisitorInfoMessage((VisitorInfoMessage)message);
} else {
handleDocumentMessage((DocumentMessage)message);
}
} catch (Throwable t) {
com.yahoo.protect.Process.logAndDie("Caught unhandled error when processing message", t);
}
}
} | class HandleMessageTask implements Runnable {
private final Message message;
private HandleMessageTask(Message message) {
this.message = message;
}
@Override
public void run() {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Visitor session " + sessionName + ": Received message " + message);
}
try {
if (message instanceof VisitorInfoMessage) {
handleVisitorInfoMessage((VisitorInfoMessage)message);
} else {
handleDocumentMessage((DocumentMessage)message);
}
} catch (Throwable t) {
com.yahoo.protect.Process.logAndDie("Caught unhandled error when processing message", t);
}
}
} |
materializedView may got null better to check this | public void dropPartition(Database db, Table table, DropPartitionClause clause) throws DdlException {
OlapTable olapTable = (OlapTable) table;
Preconditions.checkArgument(db.isWriteLockHeldByCurrentThread());
String partitionName = clause.getPartitionName();
boolean isTempPartition = clause.isTempPartition();
if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) {
throw new DdlException("Table[" + olapTable.getName() + "]'s state is not NORMAL");
}
if (!olapTable.checkPartitionNameExist(partitionName, isTempPartition)) {
if (clause.isSetIfExists()) {
LOG.info("drop partition[{}] which does not exist", partitionName);
return;
} else {
ErrorReport.reportDdlException(ErrorCode.ERR_DROP_PARTITION_NON_EXISTENT, partitionName);
}
}
PartitionInfo partitionInfo = olapTable.getPartitionInfo();
if (partitionInfo.getType() != PartitionType.RANGE) {
throw new DdlException("Alter table [" + olapTable.getName() + "] failed. Not a partitioned table");
}
Set<Long> tabletIdSet = new HashSet<Long>();
if (isTempPartition) {
olapTable.dropTempPartition(partitionName, true);
} else {
if (!clause.isForceDrop()) {
Partition partition = olapTable.getPartition(partitionName);
if (partition != null) {
if (stateMgr.getGlobalTransactionMgr()
.existCommittedTxns(db.getId(), olapTable.getId(), partition.getId())) {
throw new DdlException(
"There are still some transactions in the COMMITTED state waiting to be completed." +
" The partition [" + partitionName +
"] cannot be dropped. If you want to forcibly drop(cannot be recovered)," +
" please use \"DROP partition FORCE\".");
}
}
}
tabletIdSet = olapTable.dropPartition(db.getId(), partitionName, clause.isForceDrop());
try {
for (Long mvId : olapTable.getRelatedMaterializedViews()) {
MaterializedView materializedView = (MaterializedView) db.getTable(mvId);
if (materializedView.isLoadTriggeredRefresh()) {
GlobalStateMgr.getCurrentState().getLocalMetastore().refreshMaterializedView(
db.getFullName(), materializedView.getName(), Constants.TaskRunPriority.NORMAL.value());
}
}
} catch (MetaNotFoundException e) {
throw new DdlException(e.getMessage());
}
}
DropPartitionInfo info = new DropPartitionInfo(db.getId(), olapTable.getId(), partitionName, isTempPartition,
clause.isForceDrop());
editLog.logDropPartition(info);
if (!tabletIdSet.isEmpty()) {
stateMgr.getShardManager().getShardDeleter().addUnusedShardId(tabletIdSet);
}
LOG.info("succeed in droping partition[{}], is temp : {}, is force : {}", partitionName, isTempPartition,
clause.isForceDrop());
} | MaterializedView materializedView = (MaterializedView) db.getTable(mvId); | public void dropPartition(Database db, Table table, DropPartitionClause clause) throws DdlException {
OlapTable olapTable = (OlapTable) table;
Preconditions.checkArgument(db.isWriteLockHeldByCurrentThread());
String partitionName = clause.getPartitionName();
boolean isTempPartition = clause.isTempPartition();
if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) {
throw new DdlException("Table[" + olapTable.getName() + "]'s state is not NORMAL");
}
if (!olapTable.checkPartitionNameExist(partitionName, isTempPartition)) {
if (clause.isSetIfExists()) {
LOG.info("drop partition[{}] which does not exist", partitionName);
return;
} else {
ErrorReport.reportDdlException(ErrorCode.ERR_DROP_PARTITION_NON_EXISTENT, partitionName);
}
}
PartitionInfo partitionInfo = olapTable.getPartitionInfo();
if (partitionInfo.getType() != PartitionType.RANGE) {
throw new DdlException("Alter table [" + olapTable.getName() + "] failed. Not a partitioned table");
}
Set<Long> tabletIdSet = new HashSet<Long>();
if (isTempPartition) {
olapTable.dropTempPartition(partitionName, true);
} else {
if (!clause.isForceDrop()) {
Partition partition = olapTable.getPartition(partitionName);
if (partition != null) {
if (stateMgr.getGlobalTransactionMgr()
.existCommittedTxns(db.getId(), olapTable.getId(), partition.getId())) {
throw new DdlException(
"There are still some transactions in the COMMITTED state waiting to be completed." +
" The partition [" + partitionName +
"] cannot be dropped. If you want to forcibly drop(cannot be recovered)," +
" please use \"DROP partition FORCE\".");
}
}
}
tabletIdSet = olapTable.dropPartition(db.getId(), partitionName, clause.isForceDrop());
try {
for (Long mvId : olapTable.getRelatedMaterializedViews()) {
MaterializedView materializedView = (MaterializedView) db.getTable(mvId);
if (materializedView != null && materializedView.isLoadTriggeredRefresh()) {
GlobalStateMgr.getCurrentState().getLocalMetastore().refreshMaterializedView(
db.getFullName(), materializedView.getName(), Constants.TaskRunPriority.NORMAL.value());
}
}
} catch (MetaNotFoundException e) {
throw new DdlException("fail to refresh materialized views when dropping partition", e);
}
}
DropPartitionInfo info = new DropPartitionInfo(db.getId(), olapTable.getId(), partitionName, isTempPartition,
clause.isForceDrop());
editLog.logDropPartition(info);
if (!tabletIdSet.isEmpty()) {
stateMgr.getShardManager().getShardDeleter().addUnusedShardId(tabletIdSet);
}
LOG.info("succeed in droping partition[{}], is temp : {}, is force : {}", partitionName, isTempPartition,
clause.isForceDrop());
} | class LocalMetastore implements ConnectorMetadata {
private static final Logger LOG = LogManager.getLogger(LocalMetastore.class);
private final ConcurrentHashMap<Long, Database> idToDb = new ConcurrentHashMap<>();
private final ConcurrentHashMap<String, Database> fullNameToDb = new ConcurrentHashMap<>();
private Cluster defaultCluster;
private final GlobalStateMgr stateMgr;
private EditLog editLog;
private final CatalogRecycleBin recycleBin;
private ColocateTableIndex colocateTableIndex;
private final SystemInfoService systemInfoService;
public LocalMetastore(GlobalStateMgr globalStateMgr, CatalogRecycleBin recycleBin,
ColocateTableIndex colocateTableIndex, SystemInfoService systemInfoService) {
this.stateMgr = globalStateMgr;
this.recycleBin = recycleBin;
this.colocateTableIndex = colocateTableIndex;
this.systemInfoService = systemInfoService;
}
private boolean tryLock(boolean mustLock) {
return stateMgr.tryLock(mustLock);
}
private void unlock() {
stateMgr.unlock();
}
private long getNextId() {
return stateMgr.getNextId();
}
public void setEditLog(EditLog editLog) {
this.editLog = editLog;
}
public void recreateTabletInvertIndex() {
if (isCheckpointThread()) {
return;
}
TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex();
for (Database db : this.fullNameToDb.values()) {
long dbId = db.getId();
for (Table table : db.getTables()) {
if (!table.isNativeTable()) {
continue;
}
OlapTable olapTable = (OlapTable) table;
long tableId = olapTable.getId();
Collection<Partition> allPartitions = olapTable.getAllPartitions();
for (Partition partition : allPartitions) {
long partitionId = partition.getId();
TStorageMedium medium = olapTable.getPartitionInfo().getDataProperty(
partitionId).getStorageMedium();
for (MaterializedIndex index : partition
.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
long indexId = index.getId();
int schemaHash = olapTable.getSchemaHashByIndexId(indexId);
TabletMeta tabletMeta = new TabletMeta(dbId, tableId, partitionId, indexId, schemaHash, medium,
table.isLakeTable());
for (Tablet tablet : index.getTablets()) {
long tabletId = tablet.getId();
invertedIndex.addTablet(tabletId, tabletMeta);
if (table.isLocalTable()) {
for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) {
invertedIndex.addReplica(tabletId, replica);
if (MetaContext.get().getMetaVersion() < FeMetaVersion.VERSION_48) {
replica.setSchemaHash(schemaHash);
}
}
}
}
}
}
}
}
}
public long loadDb(DataInputStream dis, long checksum) throws IOException {
int dbCount = dis.readInt();
long newChecksum = checksum ^ dbCount;
for (long i = 0; i < dbCount; ++i) {
Database db = new Database();
db.readFields(dis);
newChecksum ^= db.getId();
idToDb.put(db.getId(), db);
fullNameToDb.put(db.getFullName(), db);
stateMgr.getGlobalTransactionMgr().addDatabaseTransactionMgr(db.getId());
db.getMaterializedViews().stream().forEach(Table::onCreate);
}
LOG.info("finished replay databases from image");
return newChecksum;
}
public long saveDb(DataOutputStream dos, long checksum) throws IOException {
int dbCount = idToDb.size() - 1;
checksum ^= dbCount;
dos.writeInt(dbCount);
for (Map.Entry<Long, Database> entry : idToDb.entrySet()) {
Database db = entry.getValue();
String dbName = db.getFullName();
if (!InfoSchemaDb.isInfoSchemaDb(dbName)) {
checksum ^= entry.getKey();
db.readLock();
try {
db.write(dos);
} finally {
db.readUnlock();
}
}
}
return checksum;
}
@Override
public void createDb(String dbName) throws DdlException, AlreadyExistsException {
long id = 0L;
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
if (fullNameToDb.containsKey(dbName)) {
throw new AlreadyExistsException("Database Already Exists");
} else {
id = getNextId();
Database db = new Database(id, dbName);
unprotectCreateDb(db);
editLog.logCreateDb(db);
}
} finally {
unlock();
}
LOG.info("createDb dbName = " + dbName + ", id = " + id);
}
public void unprotectCreateDb(Database db) {
idToDb.put(db.getId(), db);
fullNameToDb.put(db.getFullName(), db);
final Cluster cluster = defaultCluster;
cluster.addDb(db.getFullName(), db.getId());
stateMgr.getGlobalTransactionMgr().addDatabaseTransactionMgr(db.getId());
}
public ConcurrentHashMap<Long, Database> getIdToDb() {
return idToDb;
}
public void replayCreateDb(Database db) {
tryLock(true);
try {
unprotectCreateDb(db);
LOG.info("finish replay create db, name: {}, id: {}", db.getOriginName(), db.getId());
} finally {
unlock();
}
}
@Override
public void dropDb(String dbName, boolean isForceDrop) throws DdlException, MetaNotFoundException {
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
List<Runnable> runnableList;
try {
if (!fullNameToDb.containsKey(dbName)) {
throw new MetaNotFoundException("Database not found");
}
Database db = this.fullNameToDb.get(dbName);
db.writeLock();
try {
if (!isForceDrop && stateMgr.getGlobalTransactionMgr().existCommittedTxns(db.getId(), null, null)) {
throw new DdlException("There are still some transactions in the COMMITTED state waiting to be completed. " +
"The database [" + dbName +
"] cannot be dropped. If you want to forcibly drop(cannot be recovered)," +
" please use \"DROP database FORCE\".");
}
Set<String> tableNames = db.getTableNamesWithLock();
runnableList = unprotectDropDb(db, isForceDrop, false);
if (!isForceDrop) {
recycleBin.recycleDatabase(db, tableNames);
} else {
stateMgr.onEraseDatabase(db.getId());
}
} finally {
db.writeUnlock();
}
idToDb.remove(db.getId());
fullNameToDb.remove(db.getFullName());
final Cluster cluster = defaultCluster;
cluster.removeDb(dbName, db.getId());
DropDbInfo info = new DropDbInfo(db.getFullName(), isForceDrop);
editLog.logDropDb(info);
LOG.info("finish drop database[{}], id: {}, is force : {}", dbName, db.getId(), isForceDrop);
} finally {
unlock();
}
for (Runnable runnable : runnableList) {
runnable.run();
}
}
@NotNull
public List<Runnable> unprotectDropDb(Database db, boolean isForeDrop, boolean isReplay) {
List<Runnable> runnableList = new ArrayList<>();
for (Table table : db.getTables()) {
Runnable runnable = db.unprotectDropTable(table.getId(), isForeDrop, isReplay);
if (runnable != null) {
runnableList.add(runnable);
}
}
return runnableList;
}
public void replayDropDb(String dbName, boolean isForceDrop) throws DdlException {
List<Runnable> runnableList;
tryLock(true);
try {
Database db = fullNameToDb.get(dbName);
db.writeLock();
try {
Set<String> tableNames = db.getTableNamesWithLock();
runnableList = unprotectDropDb(db, isForceDrop, true);
if (!isForceDrop) {
recycleBin.recycleDatabase(db, tableNames);
} else {
stateMgr.onEraseDatabase(db.getId());
}
} finally {
db.writeUnlock();
}
fullNameToDb.remove(dbName);
idToDb.remove(db.getId());
final Cluster cluster = defaultCluster;
cluster.removeDb(dbName, db.getId());
LOG.info("finish replay drop db, name: {}, id: {}", dbName, db.getId());
} finally {
unlock();
}
for (Runnable runnable : runnableList) {
runnable.run();
}
}
public void recoverDatabase(RecoverDbStmt recoverStmt) throws DdlException {
if (getDb(recoverStmt.getDbName()) != null) {
throw new DdlException("Database[" + recoverStmt.getDbName() + "] already exist.");
}
Database db = recycleBin.recoverDatabase(recoverStmt.getDbName());
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
if (fullNameToDb.containsKey(db.getFullName())) {
throw new DdlException("Database[" + db.getOriginName() + "] already exist.");
}
fullNameToDb.put(db.getFullName(), db);
idToDb.put(db.getId(), db);
final Cluster cluster = defaultCluster;
cluster.addDb(db.getFullName(), db.getId());
RecoverInfo recoverInfo = new RecoverInfo(db.getId(), -1L, -1L);
editLog.logRecoverDb(recoverInfo);
} finally {
unlock();
}
LOG.info("finish recover database, name: {}, id: {}", recoverStmt.getDbName(), db.getId());
}
public void recoverTable(RecoverTableStmt recoverStmt) throws DdlException {
String dbName = recoverStmt.getDbName();
Database db = null;
if ((db = getDb(dbName)) == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
String tableName = recoverStmt.getTableName();
db.writeLock();
try {
Table table = db.getTable(tableName);
if (table != null) {
ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_EXISTS_ERROR, tableName);
}
if (!recycleBin.recoverTable(db, tableName)) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName);
}
} finally {
db.writeUnlock();
}
}
public void recoverPartition(RecoverPartitionStmt recoverStmt) throws DdlException {
String dbName = recoverStmt.getDbName();
Database db = null;
if ((db = getDb(dbName)) == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
String tableName = recoverStmt.getTableName();
db.writeLock();
try {
Table table = db.getTable(tableName);
if (table == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName);
}
if (!table.isOlapOrLakeTable()) {
throw new DdlException("table[" + tableName + "] is not OLAP table or LAKE table");
}
OlapTable olapTable = (OlapTable) table;
String partitionName = recoverStmt.getPartitionName();
if (olapTable.getPartition(partitionName) != null) {
throw new DdlException("partition[" + partitionName + "] already exist in table[" + tableName + "]");
}
recycleBin.recoverPartition(db.getId(), olapTable, partitionName);
} finally {
db.writeUnlock();
}
}
public void replayEraseDatabase(long dbId) {
recycleBin.replayEraseDatabase(dbId);
}
public void replayRecoverDatabase(RecoverInfo info) {
long dbId = info.getDbId();
Database db = recycleBin.replayRecoverDatabase(dbId);
replayCreateDb(db);
LOG.info("replay recover db[{}], name: {}", dbId, db.getOriginName());
}
public void alterDatabaseQuota(AlterDatabaseQuotaStmt stmt) throws DdlException {
String dbName = stmt.getDbName();
Database db = getDb(dbName);
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
AlterDatabaseQuotaStmt.QuotaType quotaType = stmt.getQuotaType();
if (quotaType == AlterDatabaseQuotaStmt.QuotaType.DATA) {
db.setDataQuotaWithLock(stmt.getQuota());
} else if (quotaType == AlterDatabaseQuotaStmt.QuotaType.REPLICA) {
db.setReplicaQuotaWithLock(stmt.getQuota());
}
long quota = stmt.getQuota();
DatabaseInfo dbInfo = new DatabaseInfo(db.getFullName(), "", quota, quotaType);
editLog.logAlterDb(dbInfo);
}
public void replayAlterDatabaseQuota(String dbName, long quota, AlterDatabaseQuotaStmt.QuotaType quotaType) {
Database db = getDb(dbName);
Preconditions.checkNotNull(db);
if (quotaType == AlterDatabaseQuotaStmt.QuotaType.DATA) {
db.setDataQuotaWithLock(quota);
} else if (quotaType == AlterDatabaseQuotaStmt.QuotaType.REPLICA) {
db.setReplicaQuotaWithLock(quota);
}
}
public void renameDatabase(AlterDatabaseRename stmt) throws DdlException {
String fullDbName = stmt.getDbName();
String newFullDbName = stmt.getNewDbName();
if (fullDbName.equals(newFullDbName)) {
throw new DdlException("Same database name");
}
Database db;
Cluster cluster;
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
cluster = defaultCluster;
if (cluster == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_NO_EXISTS, SystemInfoService.DEFAULT_CLUSTER);
}
db = fullNameToDb.get(fullDbName);
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, fullDbName);
}
if (fullNameToDb.get(newFullDbName) != null) {
throw new DdlException("Database name[" + newFullDbName + "] is already used");
}
cluster.removeDb(db.getFullName(), db.getId());
cluster.addDb(newFullDbName, db.getId());
db.setNameWithLock(newFullDbName);
fullNameToDb.remove(fullDbName);
fullNameToDb.put(newFullDbName, db);
DatabaseInfo dbInfo =
new DatabaseInfo(fullDbName, newFullDbName, -1L, AlterDatabaseQuotaStmt.QuotaType.NONE);
editLog.logDatabaseRename(dbInfo);
} finally {
unlock();
}
LOG.info("rename database[{}] to [{}], id: {}", fullDbName, newFullDbName, db.getId());
}
public void replayRenameDatabase(String dbName, String newDbName) {
tryLock(true);
try {
Database db = fullNameToDb.get(dbName);
Cluster cluster = defaultCluster;
cluster.removeDb(db.getFullName(), db.getId());
db.setName(newDbName);
cluster.addDb(newDbName, db.getId());
fullNameToDb.remove(dbName);
fullNameToDb.put(newDbName, db);
LOG.info("replay rename database {} to {}, id: {}", dbName, newDbName, db.getId());
} finally {
unlock();
}
}
/**
* Following is the step to create an olap table:
* 1. create columns
* 2. create partition info
* 3. create distribution info
* 4. set table id and base index id
* 5. set bloom filter columns
* 6. set and build TableProperty includes:
* 6.1. dynamicProperty
* 6.2. replicationNum
* 6.3. inMemory
* 6.4. storageFormat
* 7. set index meta
* 8. check colocation properties
* 9. create tablet in BE
* 10. add this table to FE's meta
* 11. add this table to ColocateGroup if necessary
*/
@Override
public void createTable(CreateTableStmt stmt) throws DdlException {
String engineName = stmt.getEngineName();
String dbName = stmt.getDbName();
String tableName = stmt.getTableName();
Database db = getDb(stmt.getDbName());
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
if (!stmt.isExternal()) {
systemInfoService.checkClusterCapacity();
db.checkQuota();
}
db.readLock();
try {
if (db.getTable(tableName) != null) {
if (stmt.isSetIfNotExists()) {
LOG.info("create table[{}] which already exists", tableName);
return;
} else {
ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_EXISTS_ERROR, tableName);
}
}
} finally {
db.readUnlock();
}
if (stmt.isOlapOrLakeEngine()) {
createOlapOrLakeTable(db, stmt);
return;
} else if (engineName.equalsIgnoreCase("mysql")) {
createMysqlTable(db, stmt);
return;
} else if (engineName.equalsIgnoreCase("elasticsearch") || engineName.equalsIgnoreCase("es")) {
createEsTable(db, stmt);
return;
} else if (engineName.equalsIgnoreCase("hive")) {
createHiveTable(db, stmt);
return;
} else if (engineName.equalsIgnoreCase("iceberg")) {
createIcebergTable(db, stmt);
return;
} else if (engineName.equalsIgnoreCase("hudi")) {
createHudiTable(db, stmt);
return;
} else if (engineName.equalsIgnoreCase("jdbc")) {
createJDBCTable(db, stmt);
return;
} else {
ErrorReport.reportDdlException(ErrorCode.ERR_UNKNOWN_STORAGE_ENGINE, engineName);
}
Preconditions.checkState(false);
}
public void createTableLike(CreateTableLikeStmt stmt) throws DdlException {
try {
Database db = getDb(stmt.getExistedDbName());
List<String> createTableStmt = Lists.newArrayList();
db.readLock();
try {
Table table = db.getTable(stmt.getExistedTableName());
if (table == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, stmt.getExistedTableName());
}
GlobalStateMgr.getDdlStmt(stmt.getDbName(), table, createTableStmt, null, null, false, false);
if (createTableStmt.isEmpty()) {
ErrorReport.reportDdlException(ErrorCode.ERROR_CREATE_TABLE_LIKE_EMPTY, "CREATE");
}
} finally {
db.readUnlock();
}
StatementBase statementBase = com.starrocks.sql.parser.SqlParser.parse(createTableStmt.get(0),
ConnectContext.get().getSessionVariable().getSqlMode()).get(0);
com.starrocks.sql.analyzer.Analyzer.analyze(statementBase, ConnectContext.get());
if (statementBase instanceof CreateTableStmt) {
CreateTableStmt parsedCreateTableStmt = (CreateTableStmt) statementBase;
parsedCreateTableStmt.setTableName(stmt.getTableName());
if (stmt.isSetIfNotExists()) {
parsedCreateTableStmt.setIfNotExists();
}
createTable(parsedCreateTableStmt);
} else if (statementBase instanceof CreateViewStmt) {
ErrorReport.reportDdlException(ErrorCode.ERROR_CREATE_TABLE_LIKE_UNSUPPORTED_VIEW);
}
} catch (UserException e) {
throw new DdlException("Failed to execute CREATE TABLE LIKE " + stmt.getExistedTableName() + ". Reason: " +
e.getMessage(), e);
}
}
@Override
public void addPartitions(Database db, String tableName, AddPartitionClause addPartitionClause)
throws DdlException, AnalysisException {
PartitionDesc partitionDesc = addPartitionClause.getPartitionDesc();
if (partitionDesc instanceof SingleItemListPartitionDesc
|| partitionDesc instanceof MultiItemListPartitionDesc
|| partitionDesc instanceof SingleRangePartitionDesc) {
addPartitions(db, tableName, ImmutableList.of(partitionDesc),
addPartitionClause);
} else if (partitionDesc instanceof MultiRangePartitionDesc) {
db.readLock();
RangePartitionInfo rangePartitionInfo;
Map<String, String> tableProperties;
try {
Table table = db.getTable(tableName);
CatalogUtils.checkTableExist(db, tableName);
CatalogUtils.checkNativeTable(db, table);
OlapTable olapTable = (OlapTable) table;
tableProperties = olapTable.getTableProperty().getProperties();
PartitionInfo partitionInfo = olapTable.getPartitionInfo();
rangePartitionInfo = (RangePartitionInfo) partitionInfo;
} finally {
db.readUnlock();
}
if (rangePartitionInfo == null) {
throw new DdlException("Alter batch get partition info failed.");
}
List<Column> partitionColumns = rangePartitionInfo.getPartitionColumns();
if (partitionColumns.size() != 1) {
throw new DdlException("Alter batch build partition only support single range column.");
}
Column firstPartitionColumn = partitionColumns.get(0);
MultiRangePartitionDesc multiRangePartitionDesc = (MultiRangePartitionDesc) partitionDesc;
Map<String, String> properties = addPartitionClause.getProperties();
if (properties == null) {
properties = Maps.newHashMap();
}
if (tableProperties != null && tableProperties.containsKey(DynamicPartitionProperty.START_DAY_OF_WEEK)) {
properties.put(DynamicPartitionProperty.START_DAY_OF_WEEK,
tableProperties.get(DynamicPartitionProperty.START_DAY_OF_WEEK));
}
List<SingleRangePartitionDesc> singleRangePartitionDescs = multiRangePartitionDesc
.convertToSingle(firstPartitionColumn.getType(), properties);
List<PartitionDesc> partitionDescs = singleRangePartitionDescs.stream().map(item -> {
PartitionDesc desc = item;
return desc;
}).collect(Collectors.toList());
addPartitions(db, tableName, partitionDescs, addPartitionClause);
}
}
private OlapTable checkTable(Database db, String tableName) throws DdlException {
CatalogUtils.checkTableExist(db, tableName);
Table table = db.getTable(tableName);
CatalogUtils.checkNativeTable(db, table);
OlapTable olapTable = (OlapTable) table;
CatalogUtils.checkTableState(olapTable, tableName);
return olapTable;
}
private void checkPartitionType(PartitionInfo partitionInfo) throws DdlException {
PartitionType partitionType = partitionInfo.getType();
if (partitionType != PartitionType.RANGE && partitionType != PartitionType.LIST) {
throw new DdlException("Only support adding partition to range/list partitioned table");
}
}
private void analyzeAddPartition(OlapTable olapTable, List<PartitionDesc> partitionDescs,
AddPartitionClause addPartitionClause, PartitionInfo partitionInfo)
throws DdlException, AnalysisException, NotImplementedException {
Set<String> existPartitionNameSet =
CatalogUtils.checkPartitionNameExistForAddPartitions(olapTable, partitionDescs);
Map<String, String> properties = olapTable.getProperties();
Map<String, String> clauseProperties = addPartitionClause.getProperties();
if (clauseProperties != null && !clauseProperties.isEmpty()) {
properties.putAll(clauseProperties);
}
for (PartitionDesc partitionDesc : partitionDescs) {
Map<String, String> cloneProperties = Maps.newHashMap(properties);
Map<String, String> sourceProperties = partitionDesc.getProperties();
if (sourceProperties != null && !sourceProperties.isEmpty()) {
cloneProperties.putAll(sourceProperties);
}
if (partitionDesc instanceof SingleRangePartitionDesc) {
RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo;
SingleRangePartitionDesc singleRangePartitionDesc = ((SingleRangePartitionDesc) partitionDesc);
singleRangePartitionDesc.analyze(rangePartitionInfo.getPartitionColumns().size(), cloneProperties);
if (!existPartitionNameSet.contains(singleRangePartitionDesc.getPartitionName())) {
rangePartitionInfo.checkAndCreateRange(singleRangePartitionDesc,
addPartitionClause.isTempPartition());
}
} else if (partitionDesc instanceof SingleItemListPartitionDesc
|| partitionDesc instanceof MultiItemListPartitionDesc) {
List<ColumnDef> columnDefList = partitionInfo.getPartitionColumns().stream()
.map(item -> new ColumnDef(item.getName(), new TypeDef(item.getType())))
.collect(Collectors.toList());
partitionDesc.analyze(columnDefList, cloneProperties);
CatalogUtils.checkPartitionValuesExistForAddListPartition(olapTable, partitionDesc);
} else {
throw new DdlException("Only support adding partition to range/list partitioned table");
}
}
}
private DistributionInfo getDistributionInfo(OlapTable olapTable, AddPartitionClause addPartitionClause)
throws DdlException {
DistributionInfo distributionInfo;
List<Column> baseSchema = olapTable.getBaseSchema();
DistributionInfo defaultDistributionInfo = olapTable.getDefaultDistributionInfo();
DistributionDesc distributionDesc = addPartitionClause.getDistributionDesc();
if (distributionDesc != null) {
distributionInfo = distributionDesc.toDistributionInfo(baseSchema);
if (distributionInfo.getType() != defaultDistributionInfo.getType()) {
throw new DdlException("Cannot assign different distribution type. default is: "
+ defaultDistributionInfo.getType());
}
if (distributionInfo.getType() == DistributionInfo.DistributionInfoType.HASH) {
HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) distributionInfo;
List<Column> newDistriCols = hashDistributionInfo.getDistributionColumns();
List<Column> defaultDistriCols = ((HashDistributionInfo) defaultDistributionInfo)
.getDistributionColumns();
if (!newDistriCols.equals(defaultDistriCols)) {
throw new DdlException("Cannot assign hash distribution with different distribution cols. "
+ "default is: " + defaultDistriCols);
}
if (hashDistributionInfo.getBucketNum() <= 0) {
throw new DdlException("Cannot assign hash distribution buckets less than 1");
}
}
} else {
distributionInfo = defaultDistributionInfo;
}
return distributionInfo;
}
private void checkColocation(Database db, OlapTable olapTable, DistributionInfo distributionInfo,
List<PartitionDesc> partitionDescs)
throws DdlException {
if (colocateTableIndex.isColocateTable(olapTable.getId())) {
String fullGroupName = db.getId() + "_" + olapTable.getColocateGroup();
ColocateGroupSchema groupSchema = colocateTableIndex.getGroupSchema(fullGroupName);
Preconditions.checkNotNull(groupSchema);
groupSchema.checkDistribution(distributionInfo);
for (PartitionDesc partitionDesc : partitionDescs) {
groupSchema.checkReplicationNum(partitionDesc.getReplicationNum());
}
}
}
private void checkDataProperty(List<PartitionDesc> partitionDescs) {
for (PartitionDesc partitionDesc : partitionDescs) {
DataProperty dataProperty = partitionDesc.getPartitionDataProperty();
Preconditions.checkNotNull(dataProperty);
}
}
private List<Partition> createPartitionList(Database db, OlapTable copiedTable, List<PartitionDesc> partitionDescs,
HashMap<String, Set<Long>> partitionNameToTabletSet,
Set<Long> tabletIdSetForAll)
throws DdlException {
List<Partition> partitionList = Lists.newArrayListWithCapacity(partitionDescs.size());
for (PartitionDesc partitionDesc : partitionDescs) {
long partitionId = getNextId();
DataProperty dataProperty = partitionDesc.getPartitionDataProperty();
String partitionName = partitionDesc.getPartitionName();
Long version = partitionDesc.getVersionInfo();
Set<Long> tabletIdSet = Sets.newHashSet();
copiedTable.getPartitionInfo().setDataProperty(partitionId, dataProperty);
copiedTable.getPartitionInfo().setTabletType(partitionId, partitionDesc.getTabletType());
copiedTable.getPartitionInfo()
.setReplicationNum(partitionId, partitionDesc.getReplicationNum());
copiedTable.getPartitionInfo().setIsInMemory(partitionId, partitionDesc.isInMemory());
copiedTable.getPartitionInfo().setStorageInfo(partitionId, partitionDesc.getStorageInfo());
Partition partition =
createPartition(db, copiedTable, partitionId, partitionName, version, tabletIdSet);
partitionList.add(partition);
tabletIdSetForAll.addAll(tabletIdSet);
partitionNameToTabletSet.put(partitionName, tabletIdSet);
}
return partitionList;
}
private void checkIfMetaChange(OlapTable olapTable, OlapTable copiedTable, String tableName) throws DdlException {
boolean metaChanged = false;
if (olapTable.getIndexNameToId().size() != copiedTable.getIndexNameToId().size()) {
metaChanged = true;
} else {
for (Map.Entry<Long, MaterializedIndexMeta> entry : olapTable.getIndexIdToMeta().entrySet()) {
long indexId = entry.getKey();
if (!copiedTable.getIndexIdToMeta().containsKey(indexId)) {
metaChanged = true;
break;
}
if (copiedTable.getIndexIdToMeta().get(indexId).getSchemaHash() !=
entry.getValue().getSchemaHash()) {
metaChanged = true;
break;
}
}
}
if (metaChanged) {
throw new DdlException("Table[" + tableName + "]'s meta has been changed. try again.");
}
}
private void updatePartitionInfo(PartitionInfo partitionInfo, List<Partition> partitionList,
List<PartitionDesc> partitionDescs, Set<String> existPartitionNameSet,
AddPartitionClause addPartitionClause, OlapTable olapTable)
throws DdlException {
boolean isTempPartition = addPartitionClause.isTempPartition();
if (partitionInfo instanceof RangePartitionInfo) {
RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo;
rangePartitionInfo.handleNewRangePartitionDescs(partitionDescs,
partitionList, existPartitionNameSet, isTempPartition);
} else if (partitionInfo instanceof ListPartitionInfo) {
ListPartitionInfo listPartitionInfo = (ListPartitionInfo) partitionInfo;
listPartitionInfo.handleNewListPartitionDescs(partitionDescs,
partitionList, existPartitionNameSet, isTempPartition);
} else {
throw new DdlException("Only support adding partition to range/list partitioned table");
}
if (isTempPartition) {
for (Partition partition : partitionList) {
if (!existPartitionNameSet.contains(partition.getName())) {
olapTable.addTempPartition(partition);
}
}
} else {
for (Partition partition : partitionList) {
if (!existPartitionNameSet.contains(partition.getName())) {
olapTable.addPartition(partition);
}
}
}
}
private void addRangePartitionLog(Database db, OlapTable olapTable, List<PartitionDesc> partitionDescs,
AddPartitionClause addPartitionClause, PartitionInfo partitionInfo,
List<Partition> partitionList, Set<String> existPartitionNameSet) {
boolean isTempPartition = addPartitionClause.isTempPartition();
int partitionLen = partitionList.size();
List<PartitionPersistInfoV2> partitionInfoV2List = Lists.newArrayListWithCapacity(partitionLen);
if (partitionLen == 1) {
Partition partition = partitionList.get(0);
if (existPartitionNameSet.contains(partition.getName())) {
LOG.info("add partition[{}] which already exists", partition.getName());
return;
}
long partitionId = partition.getId();
if (olapTable.isLakeTable()) {
PartitionPersistInfoV2 info = new RangePartitionPersistInfo(db.getId(), olapTable.getId(), partition,
partitionDescs.get(0).getPartitionDataProperty(), partitionInfo.getReplicationNum(partition.getId()),
partitionInfo.getIsInMemory(partition.getId()), isTempPartition,
((RangePartitionInfo) partitionInfo).getRange(partition.getId()),
((SingleRangePartitionDesc) partitionDescs.get(0)).getStorageInfo());
partitionInfoV2List.add(info);
AddPartitionsInfoV2 infos = new AddPartitionsInfoV2(partitionInfoV2List);
editLog.logAddPartitions(infos);
} else {
PartitionPersistInfo info = new PartitionPersistInfo(db.getId(), olapTable.getId(), partition,
((RangePartitionInfo) partitionInfo).getRange(partitionId),
partitionDescs.get(0).getPartitionDataProperty(),
partitionInfo.getReplicationNum(partitionId),
partitionInfo.getIsInMemory(partitionId),
isTempPartition);
editLog.logAddPartition(info);
}
LOG.info("succeed in creating partition[{}], name: {}, temp: {}", partitionId,
partition.getName(), isTempPartition);
} else {
List<PartitionPersistInfo> partitionInfoList = Lists.newArrayListWithCapacity(partitionLen);
for (int i = 0; i < partitionLen; i++) {
Partition partition = partitionList.get(i);
if (!existPartitionNameSet.contains(partition.getName())) {
if (olapTable.isLakeTable()) {
PartitionPersistInfoV2 info = new RangePartitionPersistInfo(db.getId(), olapTable.getId(),
partition, partitionDescs.get(i).getPartitionDataProperty(),
partitionInfo.getReplicationNum(partition.getId()),
partitionInfo.getIsInMemory(partition.getId()), isTempPartition,
((RangePartitionInfo) partitionInfo).getRange(partition.getId()),
((SingleRangePartitionDesc) partitionDescs.get(i)).getStorageInfo());
partitionInfoV2List.add(info);
} else {
PartitionPersistInfo info =
new PartitionPersistInfo(db.getId(), olapTable.getId(), partition,
((RangePartitionInfo) partitionInfo).getRange(partition.getId()),
partitionDescs.get(i).getPartitionDataProperty(),
partitionInfo.getReplicationNum(partition.getId()),
partitionInfo.getIsInMemory(partition.getId()),
isTempPartition);
partitionInfoList.add(info);
}
}
}
if (olapTable.isLakeTable()) {
AddPartitionsInfoV2 infos = new AddPartitionsInfoV2(partitionInfoV2List);
editLog.logAddPartitions(infos);
} else {
AddPartitionsInfo infos = new AddPartitionsInfo(partitionInfoList);
editLog.logAddPartitions(infos);
}
for (Partition partition : partitionList) {
LOG.info("succeed in creating partitions[{}], name: {}, temp: {}", partition.getId(),
partition.getName(), isTempPartition);
}
}
}
private void addListPartitionLog(Database db, OlapTable olapTable, List<PartitionDesc> partitionDescs,
AddPartitionClause addPartitionClause, PartitionInfo partitionInfo,
List<Partition> partitionList, Set<String> existPartitionNameSet)
throws DdlException {
if (partitionList == null || partitionList.size() != 1) {
throw new DdlException("Only support add one partition when add list partition now");
}
boolean isTempPartition = addPartitionClause.isTempPartition();
Partition partition = partitionList.get(0);
if (existPartitionNameSet.contains(partition.getName())) {
LOG.info("add partition[{}] which already exists", partition.getName());
return;
}
long partitionId = partition.getId();
PartitionPersistInfoV2 info = new ListPartitionPersistInfo(db.getId(), olapTable.getId(), partition,
partitionDescs.get(0).getPartitionDataProperty(),
partitionInfo.getReplicationNum(partitionId),
partitionInfo.getIsInMemory(partitionId),
isTempPartition,
((ListPartitionInfo) partitionInfo).getIdToValues().get(partitionId),
((ListPartitionInfo) partitionInfo).getIdToMultiValues().get(partitionId));
editLog.logAddPartition(info);
LOG.info("succeed in creating list partition[{}], name: {}, temp: {}", partitionId,
partition.getName(), isTempPartition);
}
private void addPartitionLog(Database db, OlapTable olapTable, List<PartitionDesc> partitionDescs,
AddPartitionClause addPartitionClause, PartitionInfo partitionInfo,
List<Partition> partitionList, Set<String> existPartitionNameSet)
throws DdlException {
PartitionType partitionType = partitionInfo.getType();
if (partitionType == PartitionType.RANGE) {
addRangePartitionLog(db, olapTable, partitionDescs, addPartitionClause, partitionInfo, partitionList,
existPartitionNameSet);
} else if (partitionType == PartitionType.LIST) {
addListPartitionLog(db, olapTable, partitionDescs, addPartitionClause, partitionInfo, partitionList,
existPartitionNameSet);
} else {
throw new DdlException("Only support adding partition log to range/list partitioned table");
}
}
private void cleanExistPartitionNameSet(Set<String> existPartitionNameSet,
HashMap<String, Set<Long>> partitionNameToTabletSet) {
for (String partitionName : existPartitionNameSet) {
Set<Long> existPartitionTabletSet = partitionNameToTabletSet.get(partitionName);
if (existPartitionTabletSet == null) {
continue;
}
for (Long tabletId : existPartitionTabletSet) {
GlobalStateMgr.getCurrentInvertedIndex().deleteTablet(tabletId);
}
}
}
private void cleanTabletIdSetForAll(Set<Long> tabletIdSetForAll, boolean isLakeTable) {
for (Long tabletId : tabletIdSetForAll) {
GlobalStateMgr.getCurrentInvertedIndex().deleteTablet(tabletId);
}
if (isLakeTable) {
stateMgr.getShardManager().getShardDeleter().addUnusedShardId(tabletIdSetForAll);
}
}
private void addPartitions(Database db, String tableName, List<PartitionDesc> partitionDescs,
AddPartitionClause addPartitionClause) throws DdlException {
DistributionInfo distributionInfo;
OlapTable olapTable;
OlapTable copiedTable;
db.readLock();
try {
olapTable = checkTable(db, tableName);
PartitionInfo partitionInfo = olapTable.getPartitionInfo();
checkPartitionType(partitionInfo);
analyzeAddPartition(olapTable, partitionDescs, addPartitionClause, partitionInfo);
distributionInfo = getDistributionInfo(olapTable, addPartitionClause);
checkColocation(db, olapTable, distributionInfo, partitionDescs);
copiedTable = olapTable.selectiveCopy(null, false, MaterializedIndex.IndexExtState.VISIBLE);
copiedTable.setDefaultDistributionInfo(distributionInfo);
} catch (AnalysisException | NotImplementedException e) {
throw new DdlException(e.getMessage(), e);
} finally {
db.readUnlock();
}
Preconditions.checkNotNull(distributionInfo);
Preconditions.checkNotNull(olapTable);
Preconditions.checkNotNull(copiedTable);
checkDataProperty(partitionDescs);
Set<Long> tabletIdSetForAll = Sets.newHashSet();
HashMap<String, Set<Long>> partitionNameToTabletSet = Maps.newHashMap();
try {
List<Partition> partitionList =
createPartitionList(db, copiedTable, partitionDescs, partitionNameToTabletSet, tabletIdSetForAll);
buildPartitions(db, copiedTable, partitionList);
db.writeLock();
Set<String> existPartitionNameSet = Sets.newHashSet();
try {
olapTable = checkTable(db, tableName);
existPartitionNameSet = CatalogUtils.checkPartitionNameExistForAddPartitions(olapTable,
partitionDescs);
if (existPartitionNameSet.size() > 0) {
for (String partitionName : existPartitionNameSet) {
LOG.info("add partition[{}] which already exists", partitionName);
}
}
checkIfMetaChange(olapTable, copiedTable, tableName);
PartitionInfo partitionInfo = olapTable.getPartitionInfo();
checkPartitionType(partitionInfo);
updatePartitionInfo(partitionInfo, partitionList, partitionDescs, existPartitionNameSet,
addPartitionClause, olapTable);
addPartitionLog(db, olapTable, partitionDescs, addPartitionClause, partitionInfo, partitionList,
existPartitionNameSet);
} finally {
cleanExistPartitionNameSet(existPartitionNameSet, partitionNameToTabletSet);
db.writeUnlock();
}
} catch (DdlException e) {
cleanTabletIdSetForAll(tabletIdSetForAll, olapTable.isLakeTable());
throw e;
}
}
public void replayAddPartition(PartitionPersistInfoV2 info) throws DdlException {
Database db = this.getDb(info.getDbId());
db.writeLock();
try {
OlapTable olapTable = (OlapTable) db.getTable(info.getTableId());
Partition partition = info.getPartition();
PartitionInfo partitionInfo = olapTable.getPartitionInfo();
if (info.isTempPartition()) {
olapTable.addTempPartition(partition);
} else {
olapTable.addPartition(partition);
}
PartitionType partitionType = partitionInfo.getType();
if (partitionType == PartitionType.LIST) {
try {
((ListPartitionInfo) partitionInfo).unprotectHandleNewPartitionDesc(
info.asListPartitionPersistInfo());
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
} else if (partitionType == PartitionType.RANGE) {
((RangePartitionInfo) partitionInfo).unprotectHandleNewSinglePartitionDesc(
info.asRangePartitionPersistInfo());
} else {
throw new DdlException("Only support adding partition to range/list partitioned table");
}
if (!isCheckpointThread()) {
TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex();
for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
long indexId = index.getId();
int schemaHash = olapTable.getSchemaHashByIndexId(indexId);
TabletMeta tabletMeta = new TabletMeta(info.getDbId(), info.getTableId(), partition.getId(),
index.getId(), schemaHash, info.getDataProperty().getStorageMedium());
for (Tablet tablet : index.getTablets()) {
long tabletId = tablet.getId();
invertedIndex.addTablet(tabletId, tabletMeta);
if (tablet instanceof LocalTablet) {
for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) {
invertedIndex.addReplica(tabletId, replica);
}
}
}
}
}
} finally {
db.writeUnlock();
}
}
public void replayAddPartition(PartitionPersistInfo info) throws DdlException {
Database db = this.getDb(info.getDbId());
db.writeLock();
try {
OlapTable olapTable = (OlapTable) db.getTable(info.getTableId());
Partition partition = info.getPartition();
PartitionInfo partitionInfo = olapTable.getPartitionInfo();
if (info.isTempPartition()) {
olapTable.addTempPartition(partition);
} else {
olapTable.addPartition(partition);
}
if (partitionInfo.getType() == PartitionType.RANGE) {
((RangePartitionInfo) partitionInfo).unprotectHandleNewSinglePartitionDesc(partition.getId(),
info.isTempPartition(), info.getRange(), info.getDataProperty(), info.getReplicationNum(),
info.isInMemory());
} else {
partitionInfo.addPartition(
partition.getId(), info.getDataProperty(), info.getReplicationNum(), info.isInMemory());
}
if (!isCheckpointThread()) {
TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex();
for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
long indexId = index.getId();
int schemaHash = olapTable.getSchemaHashByIndexId(indexId);
TabletMeta tabletMeta = new TabletMeta(info.getDbId(), info.getTableId(), partition.getId(),
index.getId(), schemaHash, info.getDataProperty().getStorageMedium());
for (Tablet tablet : index.getTablets()) {
long tabletId = tablet.getId();
invertedIndex.addTablet(tabletId, tabletMeta);
for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) {
invertedIndex.addReplica(tabletId, replica);
}
}
}
}
} finally {
db.writeUnlock();
}
}
public void replayDropPartition(DropPartitionInfo info) {
Database db = this.getDb(info.getDbId());
db.writeLock();
Set<Long> tabletIdSet = new HashSet<Long>();
try {
OlapTable olapTable = (OlapTable) db.getTable(info.getTableId());
if (info.isTempPartition()) {
olapTable.dropTempPartition(info.getPartitionName(), true);
} else {
tabletIdSet = olapTable.dropPartition(info.getDbId(), info.getPartitionName(), info.isForceDrop());
}
if (!tabletIdSet.isEmpty()) {
stateMgr.getShardManager().getShardDeleter().addUnusedShardId(tabletIdSet);
}
} finally {
db.writeUnlock();
}
}
public void replayErasePartition(long partitionId) throws DdlException {
recycleBin.replayErasePartition(partitionId);
}
public void replayRecoverPartition(RecoverInfo info) {
long dbId = info.getDbId();
Database db = getDb(dbId);
db.writeLock();
try {
Table table = db.getTable(info.getTableId());
recycleBin.replayRecoverPartition((OlapTable) table, info.getPartitionId());
} finally {
db.writeUnlock();
}
}
private Partition createPartition(Database db, OlapTable table, long partitionId, String partitionName,
Long version, Set<Long> tabletIdSet) throws DdlException {
return createPartitionCommon(db, table, partitionId, partitionName, table.getPartitionInfo(), version,
tabletIdSet);
}
private Partition createPartitionCommon(Database db, OlapTable table, long partitionId, String partitionName,
PartitionInfo partitionInfo, Long version, Set<Long> tabletIdSet)
throws DdlException {
Map<Long, MaterializedIndex> indexMap = new HashMap<>();
for (long indexId : table.getIndexIdToMeta().keySet()) {
MaterializedIndex rollup = new MaterializedIndex(indexId, MaterializedIndex.IndexState.NORMAL);
indexMap.put(indexId, rollup);
}
DistributionInfo distributionInfo = table.getDefaultDistributionInfo();
Partition partition =
new Partition(partitionId, partitionName, indexMap.get(table.getBaseIndexId()), distributionInfo);
if (version != null) {
partition.updateVisibleVersion(version);
}
short replicationNum = partitionInfo.getReplicationNum(partitionId);
TStorageMedium storageMedium = partitionInfo.getDataProperty(partitionId).getStorageMedium();
for (Map.Entry<Long, MaterializedIndex> entry : indexMap.entrySet()) {
long indexId = entry.getKey();
MaterializedIndex index = entry.getValue();
MaterializedIndexMeta indexMeta = table.getIndexIdToMeta().get(indexId);
TabletMeta tabletMeta =
new TabletMeta(db.getId(), table.getId(), partitionId, indexId, indexMeta.getSchemaHash(),
storageMedium, table.isLakeTable());
if (table.isLakeTable()) {
createLakeTablets((LakeTable) table, partitionId, index, distributionInfo, replicationNum, tabletMeta,
tabletIdSet);
} else {
createOlapTablets(index, Replica.ReplicaState.NORMAL, distributionInfo,
partition.getVisibleVersion(), replicationNum, tabletMeta, tabletIdSet);
}
if (index.getId() != table.getBaseIndexId()) {
partition.createRollupIndex(index);
}
}
return partition;
}
private void buildPartitions(Database db, OlapTable table, List<Partition> partitions) throws DdlException {
if (partitions.isEmpty()) {
return;
}
int numAliveBackends = systemInfoService.getAliveBackendNumber();
int numReplicas = 0;
for (Partition partition : partitions) {
numReplicas += partition.getReplicaCount();
}
if (partitions.size() >= 3 && numAliveBackends >= 3 && numReplicas >= numAliveBackends * 500) {
LOG.info("creating {} partitions of table {} concurrently", partitions.size(), table.getName());
buildPartitionsConcurrently(db.getId(), table, partitions, numReplicas, numAliveBackends);
} else if (numAliveBackends > 0) {
buildPartitionsSequentially(db.getId(), table, partitions, numReplicas, numAliveBackends);
} else {
throw new DdlException("no alive backend");
}
}
private int countMaxTasksPerBackend(List<CreateReplicaTask> tasks) {
Map<Long, Integer> tasksPerBackend = new HashMap<>();
for (CreateReplicaTask task : tasks) {
tasksPerBackend.compute(task.getBackendId(), (k, v) -> (v == null) ? 1 : v + 1);
}
return Collections.max(tasksPerBackend.values());
}
private void buildPartitionsSequentially(long dbId, OlapTable table, List<Partition> partitions, int numReplicas,
int numBackends) throws DdlException {
int avgReplicasPerPartition = numReplicas / partitions.size();
int partitionGroupSize = Math.max(1, numBackends * 200 / Math.max(1, avgReplicasPerPartition));
for (int i = 0; i < partitions.size(); i += partitionGroupSize) {
int endIndex = Math.min(partitions.size(), i + partitionGroupSize);
List<CreateReplicaTask> tasks = buildCreateReplicaTasks(dbId, table, partitions.subList(i, endIndex));
int partitionCount = endIndex - i;
int indexCountPerPartition = partitions.get(i).getVisibleMaterializedIndicesCount();
int timeout = Config.tablet_create_timeout_second * countMaxTasksPerBackend(tasks);
int maxTimeout = partitionCount * indexCountPerPartition * Config.max_create_table_timeout_second;
try {
sendCreateReplicaTasksAndWaitForFinished(tasks, Math.min(timeout, maxTimeout));
tasks.clear();
} finally {
for (CreateReplicaTask task : tasks) {
AgentTaskQueue.removeTask(task.getBackendId(), TTaskType.CREATE, task.getSignature());
}
}
}
}
private void buildPartitionsConcurrently(long dbId, OlapTable table, List<Partition> partitions, int numReplicas,
int numBackends) throws DdlException {
int timeout = numReplicas / numBackends * Config.tablet_create_timeout_second;
int numIndexes = partitions.stream().mapToInt(Partition::getVisibleMaterializedIndicesCount).sum();
int maxTimeout = numIndexes * Config.max_create_table_timeout_second;
MarkedCountDownLatch<Long, Long> countDownLatch = new MarkedCountDownLatch<>(numReplicas);
Thread t = new Thread(() -> {
Map<Long, List<Long>> taskSignatures = new HashMap<>();
try {
int numFinishedTasks;
int numSendedTasks = 0;
for (Partition partition : partitions) {
if (!countDownLatch.getStatus().ok()) {
break;
}
List<CreateReplicaTask> tasks = buildCreateReplicaTasks(dbId, table, partition);
for (CreateReplicaTask task : tasks) {
List<Long> signatures =
taskSignatures.computeIfAbsent(task.getBackendId(), k -> new ArrayList<>());
signatures.add(task.getSignature());
}
sendCreateReplicaTasks(tasks, countDownLatch);
numSendedTasks += tasks.size();
numFinishedTasks = numReplicas - (int) countDownLatch.getCount();
while (numSendedTasks - numFinishedTasks > 200 * numBackends) {
ThreadUtil.sleepAtLeastIgnoreInterrupts(100);
numFinishedTasks = numReplicas - (int) countDownLatch.getCount();
}
}
countDownLatch.await();
if (countDownLatch.getStatus().ok()) {
taskSignatures.clear();
}
} catch (Exception e) {
LOG.warn(e);
countDownLatch.countDownToZero(new Status(TStatusCode.UNKNOWN, e.toString()));
} finally {
for (Map.Entry<Long, List<Long>> entry : taskSignatures.entrySet()) {
for (Long signature : entry.getValue()) {
AgentTaskQueue.removeTask(entry.getKey(), TTaskType.CREATE, signature);
}
}
}
}, "partition-build");
t.start();
try {
waitForFinished(countDownLatch, Math.min(timeout, maxTimeout));
} catch (Exception e) {
countDownLatch.countDownToZero(new Status(TStatusCode.UNKNOWN, e.getMessage()));
throw e;
}
}
private List<CreateReplicaTask> buildCreateReplicaTasks(long dbId, OlapTable table, List<Partition> partitions)
throws DdlException {
List<CreateReplicaTask> tasks = new ArrayList<>();
for (Partition partition : partitions) {
tasks.addAll(buildCreateReplicaTasks(dbId, table, partition));
}
return tasks;
}
private List<CreateReplicaTask> buildCreateReplicaTasks(long dbId, OlapTable table, Partition partition)
throws DdlException {
ArrayList<CreateReplicaTask> tasks = new ArrayList<>((int) partition.getReplicaCount());
for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) {
tasks.addAll(buildCreateReplicaTasks(dbId, table, partition, index));
}
return tasks;
}
private List<CreateReplicaTask> buildCreateReplicaTasks(long dbId, OlapTable table, Partition partition,
MaterializedIndex index) throws DdlException {
List<CreateReplicaTask> tasks = new ArrayList<>((int) index.getReplicaCount());
MaterializedIndexMeta indexMeta = table.getIndexMetaByIndexId(index.getId());
for (Tablet tablet : index.getTablets()) {
if (table.isLakeTable()) {
long primaryBackendId = -1;
try {
primaryBackendId = ((LakeTablet) tablet).getPrimaryBackendId();
} catch (UserException e) {
throw new DdlException(e.getMessage());
}
CreateReplicaTask task = new CreateReplicaTask(
primaryBackendId,
dbId,
table.getId(),
partition.getId(),
index.getId(),
tablet.getId(),
indexMeta.getShortKeyColumnCount(),
indexMeta.getSchemaHash(),
partition.getVisibleVersion(),
indexMeta.getKeysType(),
indexMeta.getStorageType(),
table.getPartitionInfo().getDataProperty(partition.getId()).getStorageMedium(),
indexMeta.getSchema(),
table.getBfColumns(),
table.getBfFpp(),
null,
table.getIndexes(),
table.getPartitionInfo().getIsInMemory(partition.getId()),
table.enablePersistentIndex(),
TTabletType.TABLET_TYPE_LAKE,
table.getCompressionType());
tasks.add(task);
} else {
for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) {
CreateReplicaTask task = new CreateReplicaTask(
replica.getBackendId(),
dbId,
table.getId(),
partition.getId(),
index.getId(),
tablet.getId(),
indexMeta.getShortKeyColumnCount(),
indexMeta.getSchemaHash(),
partition.getVisibleVersion(),
indexMeta.getKeysType(),
indexMeta.getStorageType(),
table.getPartitionInfo().getDataProperty(partition.getId()).getStorageMedium(),
indexMeta.getSchema(),
table.getBfColumns(),
table.getBfFpp(),
null,
table.getIndexes(),
table.getPartitionInfo().getIsInMemory(partition.getId()),
table.enablePersistentIndex(),
table.getPartitionInfo().getTabletType(partition.getId()),
table.getCompressionType());
tasks.add(task);
}
}
}
return tasks;
}
private void sendCreateReplicaTasksAndWaitForFinished(List<CreateReplicaTask> tasks, long timeout)
throws DdlException {
MarkedCountDownLatch<Long, Long> countDownLatch = new MarkedCountDownLatch<>(tasks.size());
sendCreateReplicaTasks(tasks, countDownLatch);
waitForFinished(countDownLatch, timeout);
}
private void sendCreateReplicaTasks(List<CreateReplicaTask> tasks,
MarkedCountDownLatch<Long, Long> countDownLatch) {
HashMap<Long, AgentBatchTask> batchTaskMap = new HashMap<>();
for (CreateReplicaTask task : tasks) {
task.setLatch(countDownLatch);
countDownLatch.addMark(task.getBackendId(), task.getTabletId());
AgentBatchTask batchTask = batchTaskMap.get(task.getBackendId());
if (batchTask == null) {
batchTask = new AgentBatchTask();
batchTaskMap.put(task.getBackendId(), batchTask);
}
batchTask.addTask(task);
}
for (Map.Entry<Long, AgentBatchTask> entry : batchTaskMap.entrySet()) {
AgentTaskQueue.addBatchTask(entry.getValue());
AgentTaskExecutor.submit(entry.getValue());
}
}
private void waitForFinished(MarkedCountDownLatch<Long, Long> countDownLatch, long timeout) throws DdlException {
try {
if (countDownLatch.await(timeout, TimeUnit.SECONDS)) {
if (!countDownLatch.getStatus().ok()) {
String errMsg = "fail to create tablet: " + countDownLatch.getStatus().getErrorMsg();
LOG.warn(errMsg);
throw new DdlException(errMsg);
}
} else {
List<Map.Entry<Long, Long>> unfinishedMarks = countDownLatch.getLeftMarks();
List<Map.Entry<Long, Long>> firstThree =
unfinishedMarks.subList(0, Math.min(unfinishedMarks.size(), 3));
StringBuilder sb = new StringBuilder("fail to create tablet: timed out. unfinished replicas");
sb.append("(").append(firstThree.size()).append("/").append(unfinishedMarks.size()).append("): ");
for (Map.Entry<Long, Long> mark : firstThree) {
sb.append(mark.getValue());
sb.append('(');
Backend backend = stateMgr.getClusterInfo().getBackend(mark.getKey());
sb.append(backend != null ? backend.getHost() : "N/A");
sb.append(") ");
}
sb.append(" timeout=").append(timeout).append("s");
String errMsg = sb.toString();
LOG.warn(errMsg);
countDownLatch.countDownToZero(new Status(TStatusCode.TIMEOUT, "timed out"));
throw new DdlException(errMsg);
}
} catch (InterruptedException e) {
LOG.warn(e);
countDownLatch.countDownToZero(new Status(TStatusCode.CANCELLED, "cancelled"));
}
}
/*
* generate and check columns' order and key's existence
*/
private void validateColumns(List<Column> columns) throws DdlException {
if (columns.isEmpty()) {
ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_MUST_HAVE_COLUMNS);
}
boolean encounterValue = false;
boolean hasKey = false;
for (Column column : columns) {
if (column.isKey()) {
if (encounterValue) {
ErrorReport.reportDdlException(ErrorCode.ERR_OLAP_KEY_MUST_BEFORE_VALUE);
}
hasKey = true;
} else {
encounterValue = true;
}
}
if (!hasKey) {
ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_MUST_HAVE_KEYS);
}
}
public void setColocateTableIndex(ColocateTableIndex colocateTableIndex) {
this.colocateTableIndex = colocateTableIndex;
}
public ColocateTableIndex getColocateTableIndex() {
return colocateTableIndex;
}
private void createOlapOrLakeTable(Database db, CreateTableStmt stmt) throws DdlException {
String tableName = stmt.getTableName();
LOG.debug("begin create olap table: {}", tableName);
List<Column> baseSchema = stmt.getColumns();
validateColumns(baseSchema);
PartitionDesc partitionDesc = stmt.getPartitionDesc();
PartitionInfo partitionInfo;
Map<String, Long> partitionNameToId = Maps.newHashMap();
if (partitionDesc != null) {
if (partitionDesc instanceof RangePartitionDesc) {
RangePartitionDesc rangePartitionDesc = (RangePartitionDesc) partitionDesc;
for (SingleRangePartitionDesc desc : rangePartitionDesc.getSingleRangePartitionDescs()) {
long partitionId = getNextId();
partitionNameToId.put(desc.getPartitionName(), partitionId);
}
} else if (partitionDesc instanceof ListPartitionDesc) {
ListPartitionDesc listPartitionDesc = (ListPartitionDesc) partitionDesc;
listPartitionDesc.findAllPartitionNames()
.forEach(partitionName -> partitionNameToId.put(partitionName, getNextId()));
} else {
throw new DdlException("Currently only support range or list partition with engine type olap");
}
partitionInfo = partitionDesc.toPartitionInfo(baseSchema, partitionNameToId, false);
} else {
if (DynamicPartitionUtil.checkDynamicPartitionPropertiesExist(stmt.getProperties())) {
throw new DdlException("Only support dynamic partition properties on range partition table");
}
long partitionId = getNextId();
partitionNameToId.put(tableName, partitionId);
partitionInfo = new SinglePartitionInfo();
}
KeysDesc keysDesc = stmt.getKeysDesc();
Preconditions.checkNotNull(keysDesc);
KeysType keysType = keysDesc.getKeysType();
DistributionDesc distributionDesc = stmt.getDistributionDesc();
Preconditions.checkNotNull(distributionDesc);
DistributionInfo distributionInfo = distributionDesc.toDistributionInfo(baseSchema);
short shortKeyColumnCount = GlobalStateMgr.calcShortKeyColumnCount(baseSchema, stmt.getProperties());
LOG.debug("create table[{}] short key column count: {}", tableName, shortKeyColumnCount);
TableIndexes indexes = new TableIndexes(stmt.getIndexes());
Map<String, String> properties = stmt.getProperties();
long tableId = GlobalStateMgr.getCurrentState().getNextId();
OlapTable olapTable = null;
if (stmt.isExternal()) {
olapTable = new ExternalOlapTable(db.getId(), tableId, tableName, baseSchema, keysType, partitionInfo,
distributionInfo, indexes, properties);
} else {
if (stmt.isLakeEngine()) {
olapTable = new LakeTable(tableId, tableName, baseSchema, keysType, partitionInfo,
distributionInfo, indexes);
boolean enableStorageCache = PropertyAnalyzer.analyzeBooleanProp(
properties, PropertyAnalyzer.PROPERTIES_ENABLE_STORAGE_CACHE, false);
long storageCacheTtlS = 0;
try {
storageCacheTtlS = PropertyAnalyzer.analyzeLongProp(
properties, PropertyAnalyzer.PROPERTIES_STORAGE_CACHE_TTL, 0);
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
if (storageCacheTtlS < -1) {
throw new DdlException("Storage cache ttl should not be less than -1");
}
if (!enableStorageCache && storageCacheTtlS != 0) {
throw new DdlException("Storage cache ttl should be 0 when cache is disabled");
}
if (enableStorageCache && storageCacheTtlS == 0) {
storageCacheTtlS = Config.tablet_sched_storage_cooldown_second;
}
boolean allowAsyncWriteBack = PropertyAnalyzer.analyzeBooleanProp(
properties, PropertyAnalyzer.PROPERTIES_ALLOW_ASYNC_WRITE_BACK, false);
if (!enableStorageCache && allowAsyncWriteBack) {
throw new DdlException("storage allow_async_write_back can't be enabled when cache is disabled");
}
ShardStorageInfo shardStorageInfo = stateMgr.getStarOSAgent().getServiceShardStorageInfo();
((LakeTable) olapTable)
.setStorageInfo(shardStorageInfo, enableStorageCache, storageCacheTtlS, allowAsyncWriteBack);
} else {
Preconditions.checkState(stmt.isOlapEngine());
olapTable = new OlapTable(tableId, tableName, baseSchema, keysType, partitionInfo,
distributionInfo, indexes);
}
}
olapTable.setComment(stmt.getComment());
long baseIndexId = getNextId();
olapTable.setBaseIndexId(baseIndexId);
Set<String> bfColumns = null;
double bfFpp = 0;
try {
bfColumns = PropertyAnalyzer.analyzeBloomFilterColumns(properties, baseSchema,
olapTable.getKeysType() == KeysType.PRIMARY_KEYS);
if (bfColumns != null && bfColumns.isEmpty()) {
bfColumns = null;
}
bfFpp = PropertyAnalyzer.analyzeBloomFilterFpp(properties);
if (bfColumns != null && bfFpp == 0) {
bfFpp = FeConstants.default_bloom_filter_fpp;
} else if (bfColumns == null) {
bfFpp = 0;
}
olapTable.setBloomFilterInfo(bfColumns, bfFpp);
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
short replicationNum = FeConstants.default_replication_num;
try {
boolean isReplicationNumSet =
properties != null && properties.containsKey(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM);
replicationNum = PropertyAnalyzer.analyzeReplicationNum(properties, replicationNum);
if (isReplicationNumSet) {
olapTable.setReplicationNum(replicationNum);
}
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
boolean isInMemory =
PropertyAnalyzer.analyzeBooleanProp(properties, PropertyAnalyzer.PROPERTIES_INMEMORY, false);
olapTable.setIsInMemory(isInMemory);
boolean enablePersistentIndex =
PropertyAnalyzer.analyzeBooleanProp(properties, PropertyAnalyzer.PROPERTIES_ENABLE_PERSISTENT_INDEX,
false);
olapTable.setEnablePersistentIndex(enablePersistentIndex);
if (olapTable.getKeysType() == KeysType.PRIMARY_KEYS && olapTable.enablePersistentIndex()) {
if (!olapTable.checkPersistentIndex()) {
throw new DdlException("PrimaryKey table using persistent index don't support varchar(char) as key so far," +
" and key length should be no more than 64 Bytes");
}
}
TTabletType tabletType = TTabletType.TABLET_TYPE_DISK;
try {
tabletType = PropertyAnalyzer.analyzeTabletType(properties);
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
if (partitionInfo.getType() == PartitionType.UNPARTITIONED) {
long partitionId = partitionNameToId.get(tableName);
DataProperty dataProperty = null;
try {
boolean hasMedium = false;
if (properties != null) {
hasMedium = properties.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_MEDIUM);
}
dataProperty = PropertyAnalyzer.analyzeDataProperty(properties, DataProperty.DEFAULT_DATA_PROPERTY);
if (hasMedium) {
olapTable.setStorageMedium(dataProperty.getStorageMedium());
}
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
Preconditions.checkNotNull(dataProperty);
partitionInfo.setDataProperty(partitionId, dataProperty);
partitionInfo.setReplicationNum(partitionId, replicationNum);
partitionInfo.setIsInMemory(partitionId, isInMemory);
partitionInfo.setTabletType(partitionId, tabletType);
partitionInfo.setStorageInfo(partitionId, olapTable.getTableProperty().getStorageInfo());
}
try {
String colocateGroup = PropertyAnalyzer.analyzeColocate(properties);
if (!Strings.isNullOrEmpty(colocateGroup)) {
String fullGroupName = db.getId() + "_" + colocateGroup;
ColocateGroupSchema groupSchema = colocateTableIndex.getGroupSchema(fullGroupName);
if (groupSchema != null) {
groupSchema.checkColocateSchema(olapTable);
}
colocateTableIndex.addTableToGroup(db.getId(), olapTable, colocateGroup,
null /* generate group id inside */);
olapTable.setColocateGroup(colocateGroup);
}
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
TStorageType baseIndexStorageType = null;
try {
baseIndexStorageType = PropertyAnalyzer.analyzeStorageType(properties);
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
Preconditions.checkNotNull(baseIndexStorageType);
int schemaVersion = 0;
try {
schemaVersion = PropertyAnalyzer.analyzeSchemaVersion(properties);
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
int schemaHash = Util.schemaHash(schemaVersion, baseSchema, bfColumns, bfFpp);
olapTable.setIndexMeta(baseIndexId, tableName, baseSchema, schemaVersion, schemaHash,
shortKeyColumnCount, baseIndexStorageType, keysType);
for (AlterClause alterClause : stmt.getRollupAlterClauseList()) {
AddRollupClause addRollupClause = (AddRollupClause) alterClause;
Long baseRollupIndex = olapTable.getIndexIdByName(tableName);
TStorageType rollupIndexStorageType = null;
try {
rollupIndexStorageType = PropertyAnalyzer.analyzeStorageType(addRollupClause.getProperties());
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
Preconditions.checkNotNull(rollupIndexStorageType);
List<Column> rollupColumns = stateMgr.getRollupHandler().checkAndPrepareMaterializedView(addRollupClause,
olapTable, baseRollupIndex, false);
short rollupShortKeyColumnCount =
GlobalStateMgr.calcShortKeyColumnCount(rollupColumns, alterClause.getProperties());
int rollupSchemaHash = Util.schemaHash(schemaVersion, rollupColumns, bfColumns, bfFpp);
long rollupIndexId = getNextId();
olapTable.setIndexMeta(rollupIndexId, addRollupClause.getRollupName(), rollupColumns, schemaVersion,
rollupSchemaHash, rollupShortKeyColumnCount, rollupIndexStorageType, keysType);
}
Long version = null;
try {
version = PropertyAnalyzer.analyzeVersionInfo(properties);
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
Preconditions.checkNotNull(version);
TStorageFormat storageFormat = TStorageFormat.DEFAULT;
try {
storageFormat = PropertyAnalyzer.analyzeStorageFormat(properties);
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
olapTable.setStorageFormat(storageFormat);
TCompressionType compressionType = TCompressionType.LZ4_FRAME;
try {
compressionType = PropertyAnalyzer.analyzeCompressionType(properties);
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
olapTable.setCompressionType(compressionType);
Set<Long> tabletIdSet = new HashSet<Long>();
boolean createTblSuccess = false;
boolean addToColocateGroupSuccess = false;
try {
if (olapTable.isOlapOrLakeTable()) {
if (partitionInfo.getType() == PartitionType.UNPARTITIONED) {
long partitionId = partitionNameToId.get(tableName);
Partition partition = createPartition(db, olapTable, partitionId, tableName, version, tabletIdSet);
buildPartitions(db, olapTable, Collections.singletonList(partition));
olapTable.addPartition(partition);
} else if (partitionInfo.getType() == PartitionType.RANGE
|| partitionInfo.getType() == PartitionType.LIST) {
try {
boolean hasMedium = false;
if (properties != null) {
hasMedium = properties.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_MEDIUM);
}
DataProperty dataProperty = PropertyAnalyzer.analyzeDataProperty(properties,
DataProperty.DEFAULT_DATA_PROPERTY);
DynamicPartitionUtil
.checkAndSetDynamicPartitionBuckets(properties, distributionDesc.getBuckets());
DynamicPartitionUtil.checkAndSetDynamicPartitionProperty(olapTable, properties);
if (olapTable.dynamicPartitionExists() && olapTable.getColocateGroup() != null) {
HashDistributionInfo info = (HashDistributionInfo) distributionInfo;
if (info.getBucketNum() !=
olapTable.getTableProperty().getDynamicPartitionProperty().getBuckets()) {
throw new DdlException("dynamic_partition.buckets should equal the distribution buckets"
+ " if creating a colocate table");
}
}
if (hasMedium) {
olapTable.setStorageMedium(dataProperty.getStorageMedium());
}
if (properties != null && !properties.isEmpty()) {
throw new DdlException("Unknown properties: " + properties);
}
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
List<Partition> partitions = new ArrayList<>(partitionNameToId.size());
for (Map.Entry<String, Long> entry : partitionNameToId.entrySet()) {
Partition partition = createPartition(db, olapTable, entry.getValue(), entry.getKey(), version,
tabletIdSet);
partitions.add(partition);
}
buildPartitions(db, olapTable, partitions);
for (Partition partition : partitions) {
olapTable.addPartition(partition);
}
} else {
throw new DdlException("Unsupported partition method: " + partitionInfo.getType().name());
}
}
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
if (getDb(db.getId()) == null) {
throw new DdlException("database has been dropped when creating table");
}
createTblSuccess = db.createTableWithLock(olapTable, false);
if (!createTblSuccess) {
if (!stmt.isSetIfNotExists()) {
ErrorReport
.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, tableName, "table already exists");
} else {
LOG.info("Create table[{}] which already exists", tableName);
return;
}
}
} finally {
unlock();
}
if (colocateTableIndex.isColocateTable(tableId)) {
ColocateTableIndex.GroupId groupId = colocateTableIndex.getGroup(tableId);
List<List<Long>> backendsPerBucketSeq = colocateTableIndex.getBackendsPerBucketSeq(groupId);
ColocatePersistInfo info =
ColocatePersistInfo.createForAddTable(groupId, tableId, backendsPerBucketSeq);
editLog.logColocateAddTable(info);
addToColocateGroupSuccess = true;
}
LOG.info("Successfully create table[{};{}]", tableName, tableId);
DynamicPartitionUtil.registerOrRemoveDynamicPartitionTable(db.getId(), olapTable);
stateMgr.getDynamicPartitionScheduler().createOrUpdateRuntimeInfo(
tableName, DynamicPartitionScheduler.LAST_UPDATE_TIME, TimeUtils.getCurrentFormatTime());
} finally {
if (!createTblSuccess) {
for (Long tabletId : tabletIdSet) {
GlobalStateMgr.getCurrentInvertedIndex().deleteTablet(tabletId);
}
if (olapTable.isLakeTable()) {
stateMgr.getShardManager().getShardDeleter().addUnusedShardId(tabletIdSet);
editLog.logAddUnusedShard(tabletIdSet);
}
}
if (colocateTableIndex.isColocateTable(tableId) && !addToColocateGroupSuccess) {
colocateTableIndex.removeTable(tableId);
}
}
}
private void createMysqlTable(Database db, CreateTableStmt stmt) throws DdlException {
String tableName = stmt.getTableName();
List<Column> columns = stmt.getColumns();
long tableId = GlobalStateMgr.getCurrentState().getNextId();
MysqlTable mysqlTable = new MysqlTable(tableId, tableName, columns, stmt.getProperties());
mysqlTable.setComment(stmt.getComment());
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
if (getDb(db.getId()) == null) {
throw new DdlException("database has been dropped when creating table");
}
if (!db.createTableWithLock(mysqlTable, false)) {
if (!stmt.isSetIfNotExists()) {
ErrorReport.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, tableName, "table already exists");
} else {
LOG.info("Create table[{}] which already exists", tableName);
return;
}
}
} finally {
unlock();
}
LOG.info("Successfully create table[{}-{}]", tableName, tableId);
}
private void createEsTable(Database db, CreateTableStmt stmt) throws DdlException {
String tableName = stmt.getTableName();
List<Column> baseSchema = stmt.getColumns();
validateColumns(baseSchema);
PartitionDesc partitionDesc = stmt.getPartitionDesc();
PartitionInfo partitionInfo = null;
Map<String, Long> partitionNameToId = Maps.newHashMap();
if (partitionDesc != null) {
partitionInfo = partitionDesc.toPartitionInfo(baseSchema, partitionNameToId, false);
} else {
long partitionId = getNextId();
partitionNameToId.put(tableName, partitionId);
partitionInfo = new SinglePartitionInfo();
}
long tableId = GlobalStateMgr.getCurrentState().getNextId();
EsTable esTable = new EsTable(tableId, tableName, baseSchema, stmt.getProperties(), partitionInfo);
esTable.setComment(stmt.getComment());
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
if (getDb(db.getId()) == null) {
throw new DdlException("database has been dropped when creating table");
}
if (!db.createTableWithLock(esTable, false)) {
if (!stmt.isSetIfNotExists()) {
ErrorReport.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, tableName, "table already exists");
} else {
LOG.info("create table[{}] which already exists", tableName);
return;
}
}
} finally {
unlock();
}
LOG.info("successfully create table{} with id {}", tableName, tableId);
}
private void createHiveTable(Database db, CreateTableStmt stmt) throws DdlException {
String tableName = stmt.getTableName();
List<Column> columns = stmt.getColumns();
long tableId = getNextId();
HiveTable hiveTable = new HiveTable(tableId, tableName, columns, stmt.getProperties());
String partitionCmt = "PARTITION BY (" + String.join(", ", hiveTable.getPartitionColumnNames()) + ")";
if (Strings.isNullOrEmpty(stmt.getComment())) {
hiveTable.setComment(partitionCmt);
} else {
hiveTable.setComment(stmt.getComment());
}
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
if (getDb(db.getId()) == null) {
throw new DdlException("database has been dropped when creating table");
}
if (!db.createTableWithLock(hiveTable, false)) {
if (!stmt.isSetIfNotExists()) {
ErrorReport.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, tableName, "table already exists");
} else {
LOG.info("create table[{}] which already exists", tableName);
return;
}
}
} finally {
unlock();
}
LOG.info("successfully create table[{}-{}]", tableName, tableId);
}
private void createIcebergTable(Database db, CreateTableStmt stmt) throws DdlException {
String tableName = stmt.getTableName();
List<Column> columns = stmt.getColumns();
long tableId = getNextId();
IcebergTable icebergTable = new IcebergTable(tableId, tableName, columns, stmt.getProperties());
if (!Strings.isNullOrEmpty(stmt.getComment())) {
icebergTable.setComment(stmt.getComment());
}
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
if (getDb(db.getId()) == null) {
throw new DdlException("database has been dropped when creating table");
}
if (!db.createTableWithLock(icebergTable, false)) {
if (!stmt.isSetIfNotExists()) {
ErrorReport.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, tableName, "table already exists");
} else {
LOG.info("create table[{}] which already exists", tableName);
return;
}
}
} finally {
unlock();
}
LOG.info("successfully create table[{}-{}]", tableName, tableId);
}
private void createHudiTable(Database db, CreateTableStmt stmt) throws DdlException {
String tableName = stmt.getTableName();
List<Column> columns = stmt.getColumns();
Set<String> metaFields = new HashSet<>(Arrays.asList(
HoodieRecord.COMMIT_TIME_METADATA_FIELD,
HoodieRecord.COMMIT_SEQNO_METADATA_FIELD,
HoodieRecord.RECORD_KEY_METADATA_FIELD,
HoodieRecord.PARTITION_PATH_METADATA_FIELD,
HoodieRecord.FILENAME_METADATA_FIELD));
Set<String> includedMetaFields = columns.stream().map(Column::getName)
.filter(metaFields::contains).collect(Collectors.toSet());
metaFields.removeAll(includedMetaFields);
metaFields.forEach(f -> columns.add(new Column(f, Type.STRING, true)));
long tableId = getNextId();
HudiTable hudiTable = new HudiTable(tableId, tableName, columns, stmt.getProperties());
String partitionCmt = "PARTITION BY (" + String.join(", ", hudiTable.getPartitionColumnNames()) + ")";
if (Strings.isNullOrEmpty(stmt.getComment())) {
hudiTable.setComment(partitionCmt);
} else {
hudiTable.setComment(stmt.getComment());
}
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
if (getDb(db.getFullName()) == null) {
throw new DdlException("Database has been dropped when creating table");
}
if (!db.createTableWithLock(hudiTable, false)) {
if (!stmt.isSetIfNotExists()) {
ErrorReport.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, tableName, "table already exists");
} else {
LOG.info("Create table[{}] which already exists", tableName);
return;
}
}
} finally {
unlock();
}
LOG.info("Successfully create table[{}-{}]", tableName, tableId);
}
private void createJDBCTable(Database db, CreateTableStmt stmt) throws DdlException {
String tableName = stmt.getTableName();
List<Column> columns = stmt.getColumns();
Map<String, String> properties = stmt.getProperties();
long tableId = getNextId();
JDBCTable jdbcTable = new JDBCTable(tableId, tableName, columns, properties);
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
if (getDb(db.getFullName()) == null) {
throw new DdlException("database has been dropped when creating table");
}
if (!db.createTableWithLock(jdbcTable, false)) {
if (!stmt.isSetIfNotExists()) {
ErrorReport.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, tableName, "table already exists");
} else {
LOG.info("create table [{}] which already exists", tableName);
return;
}
}
} finally {
unlock();
}
LOG.info("successfully create jdbc table[{}-{}]", tableName, tableId);
}
public void replayCreateTable(String dbName, Table table) {
Database db = this.fullNameToDb.get(dbName);
db.createTableWithLock(table, true);
if (!isCheckpointThread()) {
if (table.isOlapOrLakeTable()) {
TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex();
OlapTable olapTable = (OlapTable) table;
long dbId = db.getId();
long tableId = table.getId();
for (Partition partition : olapTable.getAllPartitions()) {
long partitionId = partition.getId();
TStorageMedium medium = olapTable.getPartitionInfo().getDataProperty(
partitionId).getStorageMedium();
for (MaterializedIndex mIndex : partition
.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
long indexId = mIndex.getId();
int schemaHash = olapTable.getSchemaHashByIndexId(indexId);
TabletMeta tabletMeta = new TabletMeta(dbId, tableId, partitionId, indexId, schemaHash, medium,
table.isLakeTable());
for (Tablet tablet : mIndex.getTablets()) {
long tabletId = tablet.getId();
invertedIndex.addTablet(tabletId, tabletMeta);
if (table.isOlapTable()) {
for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) {
invertedIndex.addReplica(tabletId, replica);
}
}
}
}
}
DynamicPartitionUtil.registerOrRemoveDynamicPartitionTable(dbId, olapTable);
}
}
}
public void replayCreateMaterializedView(String dbName, MaterializedView materializedView) {
Database db = this.fullNameToDb.get(dbName);
db.createMaterializedWithLock(materializedView, true);
if (!isCheckpointThread()) {
TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex();
long dbId = db.getId();
long mvId = materializedView.getId();
for (Partition partition : materializedView.getAllPartitions()) {
long partitionId = partition.getId();
TStorageMedium medium = materializedView.getPartitionInfo().getDataProperty(
partitionId).getStorageMedium();
for (MaterializedIndex mIndex : partition.getMaterializedIndices(
MaterializedIndex.IndexExtState.ALL)) {
long indexId = mIndex.getId();
int schemaHash = materializedView.getSchemaHashByIndexId(indexId);
TabletMeta tabletMeta = new TabletMeta(dbId, mvId, partitionId, indexId, schemaHash, medium);
for (Tablet tablet : mIndex.getTablets()) {
long tabletId = tablet.getId();
invertedIndex.addTablet(tabletId, tabletMeta);
if (tablet instanceof LocalTablet) {
for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) {
invertedIndex.addReplica(tabletId, replica);
}
}
}
}
}
}
}
private void createLakeTablets(LakeTable table, long partitionId, MaterializedIndex index,
DistributionInfo distributionInfo, short replicationNum, TabletMeta tabletMeta,
Set<Long> tabletIdSet)
throws DdlException {
Preconditions.checkArgument(replicationNum > 0);
DistributionInfo.DistributionInfoType distributionInfoType = distributionInfo.getType();
if (distributionInfoType != DistributionInfo.DistributionInfoType.HASH) {
throw new DdlException("Unknown distribution type: " + distributionInfoType);
}
PartitionInfo partitionInfo = table.getPartitionInfo();
StorageInfo partitionStorageInfo = partitionInfo.getStorageInfo(partitionId);
CacheInfo cacheInfo = CacheInfo.newBuilder().setEnableCache(partitionStorageInfo.isEnableStorageCache())
.setTtlSeconds(partitionStorageInfo.getStorageCacheTtlS())
.setAllowAsyncWriteBack(partitionStorageInfo.isAllowAsyncWriteBack())
.build();
ShardStorageInfo shardStorageInfo = ShardStorageInfo.newBuilder(table.getShardStorageInfo())
.setCacheInfo(cacheInfo).build();
int bucketNum = distributionInfo.getBucketNum();
List<Long> shardIds = stateMgr.getStarOSAgent().createShards(bucketNum, shardStorageInfo);
for (long shardId : shardIds) {
Tablet tablet = new LakeTablet(shardId);
index.addTablet(tablet, tabletMeta);
tabletIdSet.add(tablet.getId());
}
}
private void createOlapTablets(MaterializedIndex index, Replica.ReplicaState replicaState,
DistributionInfo distributionInfo, long version, short replicationNum,
TabletMeta tabletMeta, Set<Long> tabletIdSet) throws DdlException {
Preconditions.checkArgument(replicationNum > 0);
DistributionInfo.DistributionInfoType distributionInfoType = distributionInfo.getType();
if (distributionInfoType != DistributionInfo.DistributionInfoType.HASH) {
throw new DdlException("Unknown distribution type: " + distributionInfoType);
}
List<List<Long>> backendsPerBucketSeq = null;
ColocateTableIndex.GroupId groupId = null;
if (colocateTableIndex.isColocateTable(tabletMeta.getTableId())) {
Database db = getDb(tabletMeta.getDbId());
groupId = colocateTableIndex.getGroup(tabletMeta.getTableId());
db.writeLock();
try {
backendsPerBucketSeq = colocateTableIndex.getBackendsPerBucketSeq(groupId);
} finally {
db.writeUnlock();
}
}
boolean chooseBackendsArbitrary = backendsPerBucketSeq == null || backendsPerBucketSeq.isEmpty();
if (chooseBackendsArbitrary) {
backendsPerBucketSeq = Lists.newArrayList();
}
for (int i = 0; i < distributionInfo.getBucketNum(); ++i) {
LocalTablet tablet = new LocalTablet(getNextId());
index.addTablet(tablet, tabletMeta);
tabletIdSet.add(tablet.getId());
List<Long> chosenBackendIds;
if (chooseBackendsArbitrary) {
if (Config.enable_strict_storage_medium_check) {
chosenBackendIds =
chosenBackendIdBySeq(replicationNum, tabletMeta.getStorageMedium());
} else {
chosenBackendIds = chosenBackendIdBySeq(replicationNum);
}
backendsPerBucketSeq.add(chosenBackendIds);
} else {
chosenBackendIds = backendsPerBucketSeq.get(i);
}
for (long backendId : chosenBackendIds) {
long replicaId = getNextId();
Replica replica = new Replica(replicaId, backendId, replicaState, version,
tabletMeta.getOldSchemaHash());
tablet.addReplica(replica);
}
Preconditions.checkState(chosenBackendIds.size() == replicationNum,
chosenBackendIds.size() + " vs. " + replicationNum);
}
if (groupId != null && chooseBackendsArbitrary) {
colocateTableIndex.addBackendsPerBucketSeq(groupId, backendsPerBucketSeq);
ColocatePersistInfo info =
ColocatePersistInfo.createForBackendsPerBucketSeq(groupId, backendsPerBucketSeq);
editLog.logColocateBackendsPerBucketSeq(info);
}
}
private List<Long> chosenBackendIdBySeq(int replicationNum, TStorageMedium storageMedium)
throws DdlException {
List<Long> chosenBackendIds = systemInfoService.seqChooseBackendIdsByStorageMedium(replicationNum,
true, true, storageMedium);
if (CollectionUtils.isEmpty(chosenBackendIds)) {
throw new DdlException(
"Failed to find enough hosts with storage medium " + storageMedium +
" at all backends, number of replicas needed: " +
replicationNum + ". Storage medium check failure can be forcefully ignored by executing " +
"'ADMIN SET FRONTEND CONFIG (\"enable_strict_storage_medium_check\" = \"false\");', " +
"but incompatible medium type can cause balance problem, so we strongly recommend" +
" creating table with compatible 'storage_medium' property set.");
}
return chosenBackendIds;
}
private List<Long> chosenBackendIdBySeq(int replicationNum) throws DdlException {
List<Long> chosenBackendIds =
systemInfoService.seqChooseBackendIds(replicationNum, true, true);
if (CollectionUtils.isEmpty(chosenBackendIds)) {
List<Long> backendIds = systemInfoService.getBackendIds(true);
throw new DdlException("Failed to find enough host in all backends. need: " + replicationNum +
", Current alive backend is [" + Joiner.on(",").join(backendIds) + "]");
}
return chosenBackendIds;
}
public void dropTable(DropTableStmt stmt) throws DdlException {
String dbName = stmt.getDbName();
String tableName = stmt.getTableName();
Database db = getDb(dbName);
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
db.dropTable(tableName, stmt.isSetIfExists(), stmt.isForceDrop());
}
public void sendDropTabletTasks(HashMap<Long, AgentBatchTask> batchTaskMap) {
int numDropTaskPerBe = Config.max_agent_tasks_send_per_be;
for (Map.Entry<Long, AgentBatchTask> entry : batchTaskMap.entrySet()) {
AgentBatchTask originTasks = entry.getValue();
if (originTasks.getTaskNum() > numDropTaskPerBe) {
AgentBatchTask partTask = new AgentBatchTask();
List<AgentTask> allTasks = originTasks.getAllTasks();
int curTask = 1;
for (AgentTask task : allTasks) {
partTask.addTask(task);
if (curTask++ > numDropTaskPerBe) {
AgentTaskExecutor.submit(partTask);
curTask = 1;
partTask = new AgentBatchTask();
ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
}
}
if (partTask.getAllTasks().size() > 0) {
AgentTaskExecutor.submit(partTask);
}
} else {
AgentTaskExecutor.submit(originTasks);
}
}
}
public void replayDropTable(Database db, long tableId, boolean isForceDrop) {
Runnable runnable;
db.writeLock();
try {
runnable = db.unprotectDropTable(tableId, isForceDrop, true);
} finally {
db.writeUnlock();
}
if (runnable != null) {
runnable.run();
}
}
public void replayEraseTable(long tableId) {
recycleBin.replayEraseTable(tableId);
}
public void replayEraseMultiTables(MultiEraseTableInfo multiEraseTableInfo) {
List<Long> tableIds = multiEraseTableInfo.getTableIds();
for (Long tableId : tableIds) {
recycleBin.replayEraseTable(tableId);
}
}
public void replayRecoverTable(RecoverInfo info) {
long dbId = info.getDbId();
Database db = getDb(dbId);
db.writeLock();
try {
recycleBin.replayRecoverTable(db, info.getTableId());
} finally {
db.writeUnlock();
}
}
private void unprotectAddReplica(ReplicaPersistInfo info) {
LOG.debug("replay add a replica {}", info);
Database db = getDbIncludeRecycleBin(info.getDbId());
OlapTable olapTable = (OlapTable) getTableIncludeRecycleBin(db, info.getTableId());
Partition partition = getPartitionIncludeRecycleBin(olapTable, info.getPartitionId());
MaterializedIndex materializedIndex = partition.getIndex(info.getIndexId());
LocalTablet tablet = (LocalTablet) materializedIndex.getTablet(info.getTabletId());
int schemaHash = info.getSchemaHash();
if (schemaHash == -1) {
schemaHash = olapTable.getSchemaHashByIndexId(info.getIndexId());
}
Replica replica = new Replica(info.getReplicaId(), info.getBackendId(), info.getVersion(),
schemaHash, info.getDataSize(), info.getRowCount(),
Replica.ReplicaState.NORMAL,
info.getLastFailedVersion(),
info.getLastSuccessVersion());
tablet.addReplica(replica);
}
private void unprotectUpdateReplica(ReplicaPersistInfo info) {
LOG.debug("replay update a replica {}", info);
Database db = getDbIncludeRecycleBin(info.getDbId());
OlapTable olapTable = (OlapTable) getTableIncludeRecycleBin(db, info.getTableId());
Partition partition = getPartitionIncludeRecycleBin(olapTable, info.getPartitionId());
MaterializedIndex materializedIndex = partition.getIndex(info.getIndexId());
LocalTablet tablet = (LocalTablet) materializedIndex.getTablet(info.getTabletId());
Replica replica = tablet.getReplicaByBackendId(info.getBackendId());
Preconditions.checkNotNull(replica, info);
replica.updateRowCount(info.getVersion(), info.getDataSize(), info.getRowCount());
replica.setBad(false);
}
public void replayAddReplica(ReplicaPersistInfo info) {
Database db = getDbIncludeRecycleBin(info.getDbId());
db.writeLock();
try {
unprotectAddReplica(info);
} finally {
db.writeUnlock();
}
}
public void replayUpdateReplica(ReplicaPersistInfo info) {
Database db = getDbIncludeRecycleBin(info.getDbId());
db.writeLock();
try {
unprotectUpdateReplica(info);
} finally {
db.writeUnlock();
}
}
public void unprotectDeleteReplica(ReplicaPersistInfo info) {
Database db = getDbIncludeRecycleBin(info.getDbId());
OlapTable olapTable = (OlapTable) getTableIncludeRecycleBin(db, info.getTableId());
Partition partition = getPartitionIncludeRecycleBin(olapTable, info.getPartitionId());
MaterializedIndex materializedIndex = partition.getIndex(info.getIndexId());
LocalTablet tablet = (LocalTablet) materializedIndex.getTablet(info.getTabletId());
tablet.deleteReplicaByBackendId(info.getBackendId());
}
public void replayDeleteReplica(ReplicaPersistInfo info) {
Database db = getDbIncludeRecycleBin(info.getDbId());
db.writeLock();
try {
unprotectDeleteReplica(info);
} finally {
db.writeUnlock();
}
}
@Override
public Table getTable(String dbName, String tblName) {
Database database = getDb(dbName);
if (database == null) {
return null;
}
return database.getTable(tblName);
}
@Override
public Database getDb(String name) {
if (name == null) {
return null;
}
if (fullNameToDb.containsKey(name)) {
return fullNameToDb.get(name);
} else {
String dbName = ClusterNamespace.getNameFromFullName(name);
if (dbName.equalsIgnoreCase(InfoSchemaDb.DATABASE_NAME)) {
return fullNameToDb.get(dbName.toLowerCase());
}
}
return null;
}
@Override
public Database getDb(long dbId) {
return idToDb.get(dbId);
}
public ConcurrentHashMap<String, Database> getFullNameToDb() {
return fullNameToDb;
}
public Database getDbIncludeRecycleBin(long dbId) {
Database db = idToDb.get(dbId);
if (db == null) {
db = recycleBin.getDatabase(dbId);
}
return db;
}
public Table getTableIncludeRecycleBin(Database db, long tableId) {
Table table = db.getTable(tableId);
if (table == null) {
table = recycleBin.getTable(db.getId(), tableId);
}
return table;
}
public List<Table> getTablesIncludeRecycleBin(Database db) {
List<Table> tables = db.getTables();
tables.addAll(recycleBin.getTables(db.getId()));
return tables;
}
public Partition getPartitionIncludeRecycleBin(OlapTable table, long partitionId) {
Partition partition = table.getPartition(partitionId);
if (partition == null) {
partition = recycleBin.getPartition(partitionId);
}
return partition;
}
public Collection<Partition> getPartitionsIncludeRecycleBin(OlapTable table) {
Collection<Partition> partitions = new ArrayList<>(table.getPartitions());
partitions.addAll(recycleBin.getPartitions(table.getId()));
return partitions;
}
public Collection<Partition> getAllPartitionsIncludeRecycleBin(OlapTable table) {
Collection<Partition> partitions = table.getAllPartitions();
partitions.addAll(recycleBin.getPartitions(table.getId()));
return partitions;
}
public DataProperty getDataPropertyIncludeRecycleBin(PartitionInfo info, long partitionId) {
DataProperty dataProperty = info.getDataProperty(partitionId);
if (dataProperty == null) {
dataProperty = recycleBin.getPartitionDataProperty(partitionId);
}
return dataProperty;
}
public short getReplicationNumIncludeRecycleBin(PartitionInfo info, long partitionId) {
short replicaNum = info.getReplicationNum(partitionId);
if (replicaNum == (short) -1) {
replicaNum = recycleBin.getPartitionReplicationNum(partitionId);
}
return replicaNum;
}
@Override
public List<String> listDbNames() {
return Lists.newArrayList(fullNameToDb.keySet());
}
@Override
public List<String> listTableNames(String dbName) throws DdlException {
Database database = getDb(dbName);
if (database != null) {
return database.getTables().stream()
.map(Table::getName).collect(Collectors.toList());
} else {
throw new DdlException("Database " + dbName + " doesn't exist");
}
}
@Override
public List<Long> getDbIds() {
return Lists.newArrayList(idToDb.keySet());
}
public List<Long> getDbIdsIncludeRecycleBin() {
List<Long> dbIds = getDbIds();
dbIds.addAll(recycleBin.getAllDbIds());
return dbIds;
}
public HashMap<Long, TStorageMedium> getPartitionIdToStorageMediumMap() {
HashMap<Long, TStorageMedium> storageMediumMap = new HashMap<>();
HashMap<Long, Multimap<Long, Long>> changedPartitionsMap = new HashMap<>();
long currentTimeMs = System.currentTimeMillis();
List<Long> dbIds = getDbIds();
for (long dbId : dbIds) {
Database db = getDb(dbId);
if (db == null) {
LOG.warn("db {} does not exist while doing backend report", dbId);
continue;
}
db.readLock();
try {
for (Table table : db.getTables()) {
if (table.getType() != Table.TableType.OLAP) {
continue;
}
long tableId = table.getId();
OlapTable olapTable = (OlapTable) table;
PartitionInfo partitionInfo = olapTable.getPartitionInfo();
for (Partition partition : olapTable.getAllPartitions()) {
long partitionId = partition.getId();
DataProperty dataProperty = partitionInfo.getDataProperty(partition.getId());
Preconditions.checkNotNull(dataProperty,
partition.getName() + ", pId:" + partitionId + ", db: " + dbId + ", tbl: " + tableId);
if (dataProperty.getStorageMedium() == TStorageMedium.SSD
&& dataProperty.getCooldownTimeMs() < currentTimeMs
&& olapTable.getState() == OlapTable.OlapTableState.NORMAL
&& olapTable.getKeysType() != KeysType.PRIMARY_KEYS) {
Multimap<Long, Long> multimap = changedPartitionsMap.get(dbId);
if (multimap == null) {
multimap = HashMultimap.create();
changedPartitionsMap.put(dbId, multimap);
}
multimap.put(tableId, partitionId);
} else {
storageMediumMap.put(partitionId, dataProperty.getStorageMedium());
}
}
}
} finally {
db.readUnlock();
}
}
for (Long dbId : changedPartitionsMap.keySet()) {
Database db = getDb(dbId);
if (db == null) {
LOG.warn("db {} does not exist while checking backend storage medium", dbId);
continue;
}
Multimap<Long, Long> tableIdToPartitionIds = changedPartitionsMap.get(dbId);
if (!db.tryWriteLock(Database.TRY_LOCK_TIMEOUT_MS, TimeUnit.MILLISECONDS)) {
LOG.warn("try get db {} writelock but failed when hecking backend storage medium", dbId);
continue;
}
Preconditions.checkState(db.isWriteLockHeldByCurrentThread());
try {
for (Long tableId : tableIdToPartitionIds.keySet()) {
Table table = db.getTable(tableId);
if (table == null) {
continue;
}
OlapTable olapTable = (OlapTable) table;
PartitionInfo partitionInfo = olapTable.getPartitionInfo();
Collection<Long> partitionIds = tableIdToPartitionIds.get(tableId);
for (Long partitionId : partitionIds) {
Partition partition = olapTable.getPartition(partitionId);
if (partition == null) {
continue;
}
DataProperty dataProperty = partitionInfo.getDataProperty(partition.getId());
if (dataProperty.getStorageMedium() == TStorageMedium.SSD
&& dataProperty.getCooldownTimeMs() < currentTimeMs) {
DataProperty hdd = new DataProperty(TStorageMedium.HDD);
partitionInfo.setDataProperty(partition.getId(), hdd);
storageMediumMap.put(partitionId, TStorageMedium.HDD);
LOG.debug("partition[{}-{}-{}] storage medium changed from SSD to HDD",
dbId, tableId, partitionId);
ModifyPartitionInfo info =
new ModifyPartitionInfo(db.getId(), olapTable.getId(),
partition.getId(),
hdd,
(short) -1,
partitionInfo.getIsInMemory(partition.getId()));
editLog.logModifyPartition(info);
}
}
}
} finally {
db.writeUnlock();
}
}
return storageMediumMap;
}
/*
* used for handling AlterTableStmt (for client is the ALTER TABLE command).
* including SchemaChangeHandler and RollupHandler
*/
@Override
public void alterTable(AlterTableStmt stmt) throws UserException {
stateMgr.getAlterInstance().processAlterTable(stmt);
}
/**
* used for handling AlterViewStmt (the ALTER VIEW command).
*/
@Override
public void alterView(AlterViewStmt stmt) throws UserException {
stateMgr.getAlterInstance().processAlterView(stmt, ConnectContext.get());
}
@Override
public void createMaterializedView(CreateMaterializedViewStmt stmt)
throws AnalysisException, DdlException {
stateMgr.getAlterInstance().processCreateMaterializedView(stmt);
}
@Override
public void createMaterializedView(CreateMaterializedViewStatement stmt)
throws DdlException {
String mvName = stmt.getTableName().getTbl();
String dbName = stmt.getTableName().getDb();
LOG.debug("Begin create materialized view: {}", mvName);
Database db = this.getDb(dbName);
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
db.readLock();
try {
if (db.getTable(mvName) != null) {
if (stmt.isIfNotExists()) {
LOG.info("Create materialized view [{}] which already exists", mvName);
return;
} else {
ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_EXISTS_ERROR, mvName);
}
}
} finally {
db.readUnlock();
}
List<Column> baseSchema = stmt.getMvColumnItems();
validateColumns(baseSchema);
PartitionDesc partitionDesc = stmt.getPartitionExpDesc();
PartitionInfo partitionInfo;
if (partitionDesc != null) {
partitionInfo = partitionDesc.toPartitionInfo(
Arrays.asList(stmt.getPartitionColumn()),
Maps.newHashMap(), false);
} else {
partitionInfo = new SinglePartitionInfo();
}
DistributionDesc distributionDesc = stmt.getDistributionDesc();
Preconditions.checkNotNull(distributionDesc);
DistributionInfo distributionInfo = distributionDesc.toDistributionInfo(baseSchema);
MaterializedView.MvRefreshScheme mvRefreshScheme;
RefreshSchemeDesc refreshSchemeDesc = stmt.getRefreshSchemeDesc();
if (refreshSchemeDesc.getType() == MaterializedView.RefreshType.ASYNC) {
mvRefreshScheme = new MaterializedView.MvRefreshScheme();
AsyncRefreshSchemeDesc asyncRefreshSchemeDesc = (AsyncRefreshSchemeDesc) refreshSchemeDesc;
MaterializedView.AsyncRefreshContext asyncRefreshContext = mvRefreshScheme.getAsyncRefreshContext();
asyncRefreshContext.setStartTime(Utils.getLongFromDateTime(asyncRefreshSchemeDesc.getStartTime()));
asyncRefreshContext.setDefineStartTime(asyncRefreshSchemeDesc.isDefineStartTime());
if (asyncRefreshSchemeDesc.getIntervalLiteral() != null) {
asyncRefreshContext.setStep(
((IntLiteral) asyncRefreshSchemeDesc.getIntervalLiteral().getValue()).getValue());
asyncRefreshContext.setTimeUnit(
asyncRefreshSchemeDesc.getIntervalLiteral().getUnitIdentifier().getDescription());
}
} else if (refreshSchemeDesc.getType() == MaterializedView.RefreshType.SYNC) {
mvRefreshScheme = new MaterializedView.MvRefreshScheme();
mvRefreshScheme.setType(MaterializedView.RefreshType.SYNC);
} else {
mvRefreshScheme = new MaterializedView.MvRefreshScheme();
mvRefreshScheme.setType(MaterializedView.RefreshType.MANUAL);
}
long mvId = GlobalStateMgr.getCurrentState().getNextId();
MaterializedView materializedView =
new MaterializedView(mvId, db.getId(), mvName, baseSchema, stmt.getKeysType(), partitionInfo,
distributionInfo, mvRefreshScheme);
materializedView.setComment(stmt.getComment());
materializedView.setBaseTableIds(stmt.getBaseTableIds());
materializedView.setViewDefineSql(stmt.getInlineViewDef());
materializedView.setPartitionRefTableExprs(Lists.newArrayList(stmt.getPartitionRefTableExpr()));
long baseIndexId = getNextId();
materializedView.setBaseIndexId(baseIndexId);
int schemaVersion = 0;
int schemaHash = Util.schemaHash(schemaVersion, baseSchema, null, 0d);
short shortKeyColumnCount = GlobalStateMgr.calcShortKeyColumnCount(baseSchema, null);
TStorageType baseIndexStorageType = TStorageType.COLUMN;
materializedView.setIndexMeta(baseIndexId, mvName, baseSchema, schemaVersion, schemaHash,
shortKeyColumnCount, baseIndexStorageType, stmt.getKeysType());
Map<String, String> properties = stmt.getProperties();
short replicationNum = FeConstants.default_replication_num;
try {
boolean isReplicationNumSet =
properties != null && properties.containsKey(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM);
replicationNum = PropertyAnalyzer.analyzeReplicationNum(properties, replicationNum);
if (isReplicationNumSet) {
materializedView.setReplicationNum(replicationNum);
}
} catch (AnalysisException e) {
throw new DdlException(e.getMessage(), e);
}
Map<String, String> optHints = null;
QueryRelation queryRelation = stmt.getQueryStatement().getQueryRelation();
if (queryRelation instanceof SelectRelation) {
SelectRelation selectRelation = (SelectRelation) queryRelation;
optHints = selectRelation.getSelectList().getOptHints();
if (optHints != null && !optHints.isEmpty()) {
SessionVariable sessionVariable = VariableMgr.newSessionVariable();
for (String key : optHints.keySet()) {
VariableMgr.setVar(sessionVariable, new SetVar(key, new StringLiteral(optHints.get(key))), true);
}
}
}
DataProperty dataProperty;
try {
boolean hasMedium = false;
if (properties != null) {
hasMedium = properties.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_MEDIUM);
}
dataProperty = PropertyAnalyzer.analyzeDataProperty(properties,
DataProperty.DEFAULT_DATA_PROPERTY);
if (hasMedium && dataProperty.getStorageMedium() == TStorageMedium.SSD) {
materializedView.setStorageMedium(dataProperty.getStorageMedium());
materializedView.getTableProperty().getProperties()
.put(PropertyAnalyzer.PROPERTIES_STORAGE_COLDOWN_TIME,
String.valueOf(dataProperty.getCooldownTimeMs()));
}
if (properties != null && !properties.isEmpty()) {
throw new DdlException("Unknown properties: " + properties);
}
} catch (AnalysisException e) {
throw new DdlException(e.getMessage(), e);
}
boolean createMvSuccess;
Set<Long> tabletIdSet = new HashSet<>();
if (partitionInfo.getType() == PartitionType.UNPARTITIONED) {
long partitionId = GlobalStateMgr.getCurrentState().getNextId();
Preconditions.checkNotNull(dataProperty);
partitionInfo.setDataProperty(partitionId, dataProperty);
partitionInfo.setReplicationNum(partitionId, replicationNum);
partitionInfo.setIsInMemory(partitionId, false);
partitionInfo.setTabletType(partitionId, TTabletType.TABLET_TYPE_DISK);
Long version = Partition.PARTITION_INIT_VERSION;
Partition partition = createPartition(db, materializedView, partitionId, mvName, version, tabletIdSet);
buildPartitions(db, materializedView, Collections.singletonList(partition));
materializedView.addPartition(partition);
}
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
if (getDb(db.getId()) == null) {
throw new DdlException("Database has been dropped when creating materialized view");
}
createMvSuccess = db.createMaterializedWithLock(materializedView, false);
if (!createMvSuccess) {
for (Long tabletId : tabletIdSet) {
GlobalStateMgr.getCurrentInvertedIndex().deleteTablet(tabletId);
}
if (!stmt.isIfNotExists()) {
ErrorReport
.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, materializedView,
"Materialized view already exists");
} else {
LOG.info("Create materialized view[{}] which already exists", materializedView);
return;
}
}
} finally {
unlock();
}
LOG.info("Successfully create materialized view[{};{}]", mvName, mvId);
if (createMvSuccess) {
createTaskForMaterializedView(dbName, materializedView, optHints);
}
}
private void createTaskForMaterializedView(String dbName, MaterializedView materializedView,
Map<String, String> optHints) throws DdlException {
MaterializedView.RefreshType refreshType = materializedView.getRefreshScheme().getType();
if (refreshType != MaterializedView.RefreshType.SYNC) {
Task task = TaskBuilder.buildMvTask(materializedView, dbName);
MaterializedView.AsyncRefreshContext asyncRefreshContext =
materializedView.getRefreshScheme().getAsyncRefreshContext();
if (refreshType == MaterializedView.RefreshType.MANUAL) {
task.setType(Constants.TaskType.MANUAL);
} else if (refreshType == MaterializedView.RefreshType.ASYNC) {
if (asyncRefreshContext.getTimeUnit() == null) {
task.setType(Constants.TaskType.EVENT_TRIGGERED);
} else {
long startTime = asyncRefreshContext.getStartTime();
TaskSchedule taskSchedule = new TaskSchedule(startTime,
asyncRefreshContext.getStep(),
TimeUtils.convertUnitIdentifierToTimeUnit(asyncRefreshContext.getTimeUnit()));
task.setSchedule(taskSchedule);
task.setType(Constants.TaskType.PERIODICAL);
}
}
if (optHints != null) {
Map<String, String> taskProperties = task.getProperties();
taskProperties.putAll(optHints);
}
TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager();
taskManager.createTask(task, false);
if (task.getType() == Constants.TaskType.EVENT_TRIGGERED) {
taskManager.executeTask(task.getName());
}
}
}
@Override
public void dropMaterializedView(DropMaterializedViewStmt stmt) throws DdlException, MetaNotFoundException {
if (stmt.getDbTblName() != null) {
stateMgr.getAlterInstance().processDropMaterializedView(stmt);
}
Database db = getDb(stmt.getDbName());
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, stmt.getDbName());
}
Table table;
db.readLock();
try {
table = db.getTable(stmt.getMvName());
} finally {
db.readUnlock();
}
if (table instanceof MaterializedView) {
db.dropTable(table.getName(), stmt.isSetIfExists(), true);
Set<Long> baseTableIds = ((MaterializedView) table).getBaseTableIds();
if (baseTableIds != null) {
for (Long baseTableId : baseTableIds) {
OlapTable baseTable = ((OlapTable) db.getTable(baseTableId));
if (baseTable != null) {
baseTable.removeRelatedMaterializedView(table.getId());
}
}
}
TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager();
Task refreshTask = taskManager.getTask(TaskBuilder.getMvTaskName(table.getId()));
if (refreshTask != null) {
taskManager.dropTasks(Lists.newArrayList(refreshTask.getId()), false);
}
} else {
stateMgr.getAlterInstance().processDropMaterializedView(stmt);
}
}
@Override
public void alterMaterializedView(AlterMaterializedViewStatement stmt) throws DdlException, MetaNotFoundException {
stateMgr.getAlterInstance().processAlterMaterializedView(stmt);
}
@Override
public void refreshMaterializedView(String dbName, String mvName, int priority) throws DdlException, MetaNotFoundException {
Database db = this.getDb(dbName);
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
MaterializedView materializedView = null;
db.readLock();
try {
final Table table = db.getTable(mvName);
if (table instanceof MaterializedView) {
materializedView = (MaterializedView) table;
}
} finally {
db.readUnlock();
}
if (materializedView == null) {
throw new MetaNotFoundException(mvName + " is not a materialized view");
}
TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager();
final String mvTaskName = TaskBuilder.getMvTaskName(materializedView.getId());
if (!taskManager.containTask(mvTaskName)) {
Task task = TaskBuilder.buildMvTask(materializedView, dbName);
taskManager.createTask(task, false);
}
taskManager.executeTask(mvTaskName, new ExecuteOption(priority));
}
@Override
public void cancelRefreshMaterializedView(String dbName, String mvName) throws DdlException, MetaNotFoundException {
Database db = this.getDb(dbName);
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
MaterializedView materializedView = null;
db.readLock();
try {
final Table table = db.getTable(mvName);
if (table instanceof MaterializedView) {
materializedView = (MaterializedView) table;
}
} finally {
db.readUnlock();
}
if (materializedView == null) {
throw new MetaNotFoundException(mvName + " is not a materialized view");
}
TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager();
Task refreshTask = taskManager.getTask(TaskBuilder.getMvTaskName(materializedView.getId()));
if (refreshTask != null) {
taskManager.killTask(refreshTask.getName(), false);
}
}
/*
* used for handling CacnelAlterStmt (for client is the CANCEL ALTER
* command). including SchemaChangeHandler and RollupHandler
*/
public void cancelAlter(CancelAlterTableStmt stmt) throws DdlException {
if (stmt.getAlterType() == ShowAlterStmt.AlterType.ROLLUP) {
stateMgr.getRollupHandler().cancel(stmt);
} else if (stmt.getAlterType() == ShowAlterStmt.AlterType.COLUMN) {
stateMgr.getSchemaChangeHandler().cancel(stmt);
} else if (stmt.getAlterType() == ShowAlterStmt.AlterType.MATERIALIZED_VIEW) {
stateMgr.getRollupHandler().cancelMV(stmt);
} else {
throw new DdlException("Cancel " + stmt.getAlterType() + " does not implement yet");
}
}
@Override
public void renameTable(Database db, Table table, TableRenameClause tableRenameClause) throws DdlException {
OlapTable olapTable = (OlapTable) table;
if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) {
throw new DdlException("Table[" + olapTable.getName() + "] is under " + olapTable.getState());
}
String oldTableName = olapTable.getName();
String newTableName = tableRenameClause.getNewTableName();
if (oldTableName.equals(newTableName)) {
throw new DdlException("Same table name");
}
if (db.getTable(newTableName) != null) {
throw new DdlException("Table name[" + newTableName + "] is already used");
}
olapTable.checkAndSetName(newTableName, false);
db.dropTable(oldTableName);
db.createTable(olapTable);
disableMaterializedView(db, olapTable);
TableInfo tableInfo = TableInfo.createForTableRename(db.getId(), olapTable.getId(), newTableName);
editLog.logTableRename(tableInfo);
LOG.info("rename table[{}] to {}, tableId: {}", oldTableName, newTableName, olapTable.getId());
}
private void disableMaterializedView(Database db, OlapTable olapTable) {
for (long mvId : olapTable.getRelatedMaterializedViews()) {
MaterializedView mv = (MaterializedView) db.getTable(mvId);
if (mv != null) {
mv.setActive(false);
} else {
LOG.warn("Ignore materialized view {} does not exists", mvId);
}
}
}
public void replayRenameTable(TableInfo tableInfo) {
long dbId = tableInfo.getDbId();
long tableId = tableInfo.getTableId();
String newTableName = tableInfo.getNewTableName();
Database db = getDb(dbId);
db.writeLock();
try {
OlapTable table = (OlapTable) db.getTable(tableId);
String tableName = table.getName();
db.dropTable(tableName);
table.setName(newTableName);
db.createTable(table);
disableMaterializedView(db, table);
LOG.info("replay rename table[{}] to {}, tableId: {}", tableName, newTableName, table.getId());
} finally {
db.writeUnlock();
}
}
@Override
public void renamePartition(Database db, Table table, PartitionRenameClause renameClause) throws DdlException {
OlapTable olapTable = (OlapTable) table;
if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) {
throw new DdlException("Table[" + olapTable.getName() + "] is under " + olapTable.getState());
}
if (olapTable.getPartitionInfo().getType() != PartitionType.RANGE) {
throw new DdlException("Table[" + olapTable.getName() + "] is single partitioned. "
+ "no need to rename partition name.");
}
String partitionName = renameClause.getPartitionName();
String newPartitionName = renameClause.getNewPartitionName();
if (partitionName.equalsIgnoreCase(newPartitionName)) {
throw new DdlException("Same partition name");
}
Partition partition = olapTable.getPartition(partitionName);
if (partition == null) {
throw new DdlException("Partition[" + partitionName + "] does not exists");
}
if (olapTable.checkPartitionNameExist(newPartitionName)) {
throw new DdlException("Partition name[" + newPartitionName + "] is already used");
}
olapTable.renamePartition(partitionName, newPartitionName);
disableMaterializedView(db, olapTable);
TableInfo tableInfo = TableInfo.createForPartitionRename(db.getId(), olapTable.getId(), partition.getId(),
newPartitionName);
editLog.logPartitionRename(tableInfo);
LOG.info("rename partition[{}] to {}", partitionName, newPartitionName);
}
public void replayRenamePartition(TableInfo tableInfo) {
long dbId = tableInfo.getDbId();
long tableId = tableInfo.getTableId();
long partitionId = tableInfo.getPartitionId();
String newPartitionName = tableInfo.getNewPartitionName();
Database db = getDb(dbId);
db.writeLock();
try {
OlapTable table = (OlapTable) db.getTable(tableId);
Partition partition = table.getPartition(partitionId);
table.renamePartition(partition.getName(), newPartitionName);
disableMaterializedView(db, table);
LOG.info("replay rename partition[{}] to {}", partition.getName(), newPartitionName);
} finally {
db.writeUnlock();
}
}
public void renameRollup(Database db, OlapTable table, RollupRenameClause renameClause) throws DdlException {
if (table.getState() != OlapTable.OlapTableState.NORMAL) {
throw new DdlException("Table[" + table.getName() + "] is under " + table.getState());
}
String rollupName = renameClause.getRollupName();
if (rollupName.equals(table.getName())) {
throw new DdlException("Using ALTER TABLE RENAME to change table name");
}
String newRollupName = renameClause.getNewRollupName();
if (rollupName.equals(newRollupName)) {
throw new DdlException("Same rollup name");
}
Map<String, Long> indexNameToIdMap = table.getIndexNameToId();
if (indexNameToIdMap.get(rollupName) == null) {
throw new DdlException("Rollup index[" + rollupName + "] does not exists");
}
if (indexNameToIdMap.get(newRollupName) != null) {
throw new DdlException("Rollup name[" + newRollupName + "] is already used");
}
long indexId = indexNameToIdMap.remove(rollupName);
indexNameToIdMap.put(newRollupName, indexId);
TableInfo tableInfo = TableInfo.createForRollupRename(db.getId(), table.getId(), indexId, newRollupName);
editLog.logRollupRename(tableInfo);
LOG.info("rename rollup[{}] to {}", rollupName, newRollupName);
}
public void replayRenameRollup(TableInfo tableInfo) {
long dbId = tableInfo.getDbId();
long tableId = tableInfo.getTableId();
long indexId = tableInfo.getIndexId();
String newRollupName = tableInfo.getNewRollupName();
Database db = getDb(dbId);
db.writeLock();
try {
OlapTable table = (OlapTable) db.getTable(tableId);
String rollupName = table.getIndexNameById(indexId);
Map<String, Long> indexNameToIdMap = table.getIndexNameToId();
indexNameToIdMap.remove(rollupName);
indexNameToIdMap.put(newRollupName, indexId);
LOG.info("replay rename rollup[{}] to {}", rollupName, newRollupName);
} finally {
db.writeUnlock();
}
}
public void renameColumn(Database db, OlapTable table, ColumnRenameClause renameClause) throws DdlException {
throw new DdlException("not implmented");
}
public void replayRenameColumn(TableInfo tableInfo) throws DdlException {
throw new DdlException("not implmented");
}
public void modifyTableDynamicPartition(Database db, OlapTable table, Map<String, String> properties)
throws DdlException {
Map<String, String> logProperties = new HashMap<>(properties);
TableProperty tableProperty = table.getTableProperty();
if (tableProperty == null) {
DynamicPartitionUtil.checkAndSetDynamicPartitionProperty(table, properties);
} else {
Map<String, String> analyzedDynamicPartition = DynamicPartitionUtil.analyzeDynamicPartition(properties);
tableProperty.modifyTableProperties(analyzedDynamicPartition);
tableProperty.buildDynamicProperty();
}
DynamicPartitionUtil.registerOrRemoveDynamicPartitionTable(db.getId(), table);
stateMgr.getDynamicPartitionScheduler().createOrUpdateRuntimeInfo(
table.getName(), DynamicPartitionScheduler.LAST_UPDATE_TIME, TimeUtils.getCurrentFormatTime());
ModifyTablePropertyOperationLog info =
new ModifyTablePropertyOperationLog(db.getId(), table.getId(), logProperties);
editLog.logDynamicPartition(info);
}
/**
* Set replication number for unpartitioned table.
* ATTN: only for unpartitioned table now.
*
* @param db
* @param table
* @param properties
* @throws DdlException
*/
public void modifyTableReplicationNum(Database db, OlapTable table, Map<String, String> properties)
throws DdlException {
Preconditions.checkArgument(db.isWriteLockHeldByCurrentThread());
if (colocateTableIndex.isColocateTable(table.getId())) {
throw new DdlException("table " + table.getName() + " is colocate table, cannot change replicationNum");
}
String defaultReplicationNumName = "default." + PropertyAnalyzer.PROPERTIES_REPLICATION_NUM;
PartitionInfo partitionInfo = table.getPartitionInfo();
if (partitionInfo.getType() == PartitionType.RANGE) {
throw new DdlException(
"This is a range partitioned table, you should specify partitions with MODIFY PARTITION clause." +
" If you want to set default replication number, please use '" + defaultReplicationNumName +
"' instead of '" + PropertyAnalyzer.PROPERTIES_REPLICATION_NUM + "' to escape misleading.");
}
String partitionName = table.getName();
Partition partition = table.getPartition(partitionName);
if (partition == null) {
throw new DdlException("Partition does not exist. name: " + partitionName);
}
short replicationNum = Short.parseShort(properties.get(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM));
boolean isInMemory = partitionInfo.getIsInMemory(partition.getId());
DataProperty newDataProperty = partitionInfo.getDataProperty(partition.getId());
partitionInfo.setReplicationNum(partition.getId(), replicationNum);
table.setReplicationNum(replicationNum);
ModifyPartitionInfo info = new ModifyPartitionInfo(db.getId(), table.getId(), partition.getId(),
newDataProperty, replicationNum, isInMemory);
editLog.logModifyPartition(info);
LOG.info("modify partition[{}-{}-{}] replication num to {}", db.getOriginName(), table.getName(),
partition.getName(), replicationNum);
}
/**
* Set default replication number for a specified table.
* You can see the default replication number by Show Create Table stmt.
*
* @param db
* @param table
* @param properties
*/
public void modifyTableDefaultReplicationNum(Database db, OlapTable table, Map<String, String> properties)
throws DdlException {
Preconditions.checkArgument(db.isWriteLockHeldByCurrentThread());
if (colocateTableIndex.isColocateTable(table.getId())) {
throw new DdlException("table " + table.getName() + " is colocate table, cannot change replicationNum");
}
PartitionInfo partitionInfo = table.getPartitionInfo();
Partition partition = null;
boolean isUnpartitionedTable = false;
if (partitionInfo.getType() == PartitionType.UNPARTITIONED) {
isUnpartitionedTable = true;
String partitionName = table.getName();
partition = table.getPartition(partitionName);
if (partition == null) {
throw new DdlException("Partition does not exist. name: " + partitionName);
}
}
TableProperty tableProperty = table.getTableProperty();
if (tableProperty == null) {
tableProperty = new TableProperty(properties);
table.setTableProperty(tableProperty);
} else {
tableProperty.modifyTableProperties(properties);
}
tableProperty.buildReplicationNum();
if (isUnpartitionedTable) {
Preconditions.checkNotNull(partition);
partitionInfo.setReplicationNum(partition.getId(), tableProperty.getReplicationNum());
}
ModifyTablePropertyOperationLog info =
new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties);
editLog.logModifyReplicationNum(info);
LOG.info("modify table[{}] replication num to {}", table.getName(),
properties.get(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM));
}
public void modifyTableEnablePersistentIndexMeta(Database db, OlapTable table, Map<String, String> properties) {
Preconditions.checkArgument(db.isWriteLockHeldByCurrentThread());
TableProperty tableProperty = table.getTableProperty();
if (tableProperty == null) {
tableProperty = new TableProperty(properties);
table.setTableProperty(tableProperty);
} else {
tableProperty.modifyTableProperties(properties);
}
tableProperty.buildEnablePersistentIndex();
ModifyTablePropertyOperationLog info =
new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties);
editLog.logModifyEnablePersistentIndex(info);
}
public void modifyTableInMemoryMeta(Database db, OlapTable table, Map<String, String> properties) {
Preconditions.checkArgument(db.isWriteLockHeldByCurrentThread());
TableProperty tableProperty = table.getTableProperty();
if (tableProperty == null) {
tableProperty = new TableProperty(properties);
table.setTableProperty(tableProperty);
} else {
tableProperty.modifyTableProperties(properties);
}
tableProperty.buildInMemory();
for (Partition partition : table.getPartitions()) {
table.getPartitionInfo().setIsInMemory(partition.getId(), tableProperty.isInMemory());
}
ModifyTablePropertyOperationLog info =
new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties);
editLog.logModifyInMemory(info);
}
public void modifyTableMeta(Database db, OlapTable table, Map<String, String> properties,
TTabletMetaType metaType) {
if (metaType == TTabletMetaType.INMEMORY) {
modifyTableInMemoryMeta(db, table, properties);
} else if (metaType == TTabletMetaType.ENABLE_PERSISTENT_INDEX) {
modifyTableEnablePersistentIndexMeta(db, table, properties);
}
}
public void setHasForbitGlobalDict(String dbName, String tableName, boolean isForbit) throws DdlException {
Map<String, String> property = new HashMap<>();
Database db = getDb(dbName);
if (db == null) {
throw new DdlException("the DB " + dbName + " is not exist");
}
db.readLock();
try {
Table table = db.getTable(tableName);
if (table == null) {
throw new DdlException("the DB " + dbName + " table: " + tableName + "isn't exist");
}
if (table instanceof OlapTable) {
OlapTable olapTable = (OlapTable) table;
olapTable.setHasForbitGlobalDict(isForbit);
if (isForbit) {
property.put(PropertyAnalyzer.ENABLE_LOW_CARD_DICT_TYPE, PropertyAnalyzer.DISABLE_LOW_CARD_DICT);
IDictManager.getInstance().disableGlobalDict(olapTable.getId());
} else {
property.put(PropertyAnalyzer.ENABLE_LOW_CARD_DICT_TYPE, PropertyAnalyzer.ABLE_LOW_CARD_DICT);
IDictManager.getInstance().enableGlobalDict(olapTable.getId());
}
ModifyTablePropertyOperationLog info =
new ModifyTablePropertyOperationLog(db.getId(), table.getId(), property);
editLog.logSetHasForbitGlobalDict(info);
}
} finally {
db.readUnlock();
}
}
public void replayModifyHiveTableColumn(short opCode, ModifyTableColumnOperationLog info) {
if (info.getDbName() == null) {
return;
}
String hiveExternalDb = info.getDbName();
String hiveExternalTable = info.getTableName();
LOG.info("replayModifyTableColumn hiveDb:{},hiveTable:{}", hiveExternalDb, hiveExternalTable);
List<Column> columns = info.getColumns();
Database db = getDb(hiveExternalDb);
HiveTable table;
db.writeLock();
try {
Table tbl = db.getTable(hiveExternalTable);
table = (HiveTable) tbl;
table.setNewFullSchema(columns);
} finally {
db.writeUnlock();
}
}
public void replayModifyTableProperty(short opCode, ModifyTablePropertyOperationLog info) {
long dbId = info.getDbId();
long tableId = info.getTableId();
Map<String, String> properties = info.getProperties();
Database db = getDb(dbId);
db.writeLock();
try {
OlapTable olapTable = (OlapTable) db.getTable(tableId);
if (opCode == OperationType.OP_SET_FORBIT_GLOBAL_DICT) {
String enAble = properties.get(PropertyAnalyzer.ENABLE_LOW_CARD_DICT_TYPE);
Preconditions.checkState(enAble != null);
if (olapTable != null) {
if (enAble.equals(PropertyAnalyzer.DISABLE_LOW_CARD_DICT)) {
olapTable.setHasForbitGlobalDict(true);
IDictManager.getInstance().disableGlobalDict(olapTable.getId());
} else {
olapTable.setHasForbitGlobalDict(false);
IDictManager.getInstance().enableGlobalDict(olapTable.getId());
}
}
} else {
TableProperty tableProperty = olapTable.getTableProperty();
if (tableProperty == null) {
tableProperty = new TableProperty(properties);
olapTable.setTableProperty(tableProperty.buildProperty(opCode));
} else {
tableProperty.modifyTableProperties(properties);
tableProperty.buildProperty(opCode);
}
if (opCode == OperationType.OP_MODIFY_IN_MEMORY) {
for (Partition partition : olapTable.getPartitions()) {
olapTable.getPartitionInfo().setIsInMemory(partition.getId(), tableProperty.isInMemory());
}
} else if (opCode == OperationType.OP_MODIFY_REPLICATION_NUM) {
PartitionInfo partitionInfo = olapTable.getPartitionInfo();
if (partitionInfo.getType() == PartitionType.UNPARTITIONED) {
String partitionName = olapTable.getName();
Partition partition = olapTable.getPartition(partitionName);
if (partition != null) {
partitionInfo.setReplicationNum(partition.getId(), tableProperty.getReplicationNum());
}
}
} else if (opCode == OperationType.OP_MODIFY_ENABLE_PERSISTENT_INDEX) {
olapTable.setEnablePersistentIndex(tableProperty.enablePersistentIndex());
}
}
} finally {
db.writeUnlock();
}
}
@Override
public void createView(CreateViewStmt stmt) throws DdlException {
String dbName = stmt.getDbName();
String tableName = stmt.getTable();
Database db = this.getDb(stmt.getDbName());
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
db.readLock();
try {
if (db.getTable(tableName) != null) {
if (stmt.isSetIfNotExists()) {
LOG.info("create view[{}] which already exists", tableName);
return;
} else {
ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_EXISTS_ERROR, tableName);
}
}
} finally {
db.readUnlock();
}
List<Column> columns = stmt.getColumns();
long tableId = getNextId();
View newView = new View(tableId, tableName, columns);
newView.setComment(stmt.getComment());
newView.setInlineViewDefWithSqlMode(stmt.getInlineViewDef(),
ConnectContext.get().getSessionVariable().getSqlMode());
try {
newView.init();
} catch (UserException e) {
throw new DdlException("failed to init view stmt", e);
}
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
if (getDb(db.getId()) == null) {
throw new DdlException("database has been dropped when creating view");
}
if (!db.createTableWithLock(newView, false)) {
if (!stmt.isSetIfNotExists()) {
ErrorReport.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, tableName, "table already exists");
} else {
LOG.info("create table[{}] which already exists", tableName);
return;
}
}
} finally {
unlock();
}
LOG.info("successfully create view[" + tableName + "-" + newView.getId() + "]");
}
public void replayCreateCluster(Cluster cluster) {
tryLock(true);
try {
unprotectCreateCluster(cluster);
} finally {
unlock();
}
}
private void unprotectCreateCluster(Cluster cluster) {
Preconditions.checkState(cluster.isDefaultCluster(), "Cluster must be default cluster");
Preconditions.checkState(cluster.isEmpty(), "Cluster backendIdList must be 0");
defaultCluster = cluster;
final InfoSchemaDb infoDb = new InfoSchemaDb();
unprotectCreateDb(infoDb);
stateMgr.setIsDefaultClusterCreated(true);
}
public Cluster getCluster() {
return defaultCluster;
}
public long loadCluster(DataInputStream dis, long checksum) throws IOException {
if (GlobalStateMgr.getCurrentStateJournalVersion() >= FeMetaVersion.VERSION_30) {
int clusterCount = dis.readInt();
checksum ^= clusterCount;
for (long i = 0; i < clusterCount; ++i) {
final Cluster cluster = Cluster.read(dis);
checksum ^= cluster.getId();
Preconditions.checkState(cluster.isDefaultCluster(), "Cluster must be default_cluster");
List<Long> latestBackendIds = stateMgr.getClusterInfo().getBackendIds();
cluster.setBackendIdList(latestBackendIds);
String dbName = InfoSchemaDb.getFullInfoSchemaDbName();
InfoSchemaDb db;
if (getFullNameToDb().containsKey(dbName)) {
db = (InfoSchemaDb) GlobalStateMgr.getCurrentState().getFullNameToDb().get(dbName);
} else {
db = new InfoSchemaDb();
}
String errMsg = "InfoSchemaDb id shouldn't larger than 10000, please restart your FE server";
Preconditions.checkState(db.getId() < NEXT_ID_INIT_VALUE, errMsg);
idToDb.put(db.getId(), db);
fullNameToDb.put(db.getFullName(), db);
cluster.addDb(dbName, db.getId());
defaultCluster = cluster;
}
}
LOG.info("finished replay cluster from image");
return checksum;
}
public void initDefaultCluster() {
final List<Long> backendList = Lists.newArrayList();
final List<Backend> defaultClusterBackends = systemInfoService.getBackends();
for (Backend backend : defaultClusterBackends) {
backendList.add(backend.getId());
}
final long id = getNextId();
final Cluster cluster = new Cluster(SystemInfoService.DEFAULT_CLUSTER, id);
Set<String> beHost = Sets.newHashSet();
for (Backend be : defaultClusterBackends) {
if (beHost.contains(be.getHost())) {
LOG.error("found more than one backends in same host: {}", be.getHost());
System.exit(-1);
} else {
beHost.add(be.getHost());
}
}
cluster.setBackendIdList(backendList);
unprotectCreateCluster(cluster);
for (Database db : idToDb.values()) {
cluster.addDb(db.getFullName(), db.getId());
}
stateMgr.setIsDefaultClusterCreated(true);
editLog.logCreateCluster(cluster);
}
public long saveCluster(DataOutputStream dos, long checksum) throws IOException {
final int clusterCount = 1;
checksum ^= clusterCount;
dos.writeInt(clusterCount);
Cluster cluster = defaultCluster;
long clusterId = defaultCluster.getId();
if (clusterId >= NEXT_ID_INIT_VALUE) {
checksum ^= clusterId;
cluster.write(dos);
}
return checksum;
}
public void replayUpdateClusterAndBackends(BackendIdsUpdateInfo info) {
for (long id : info.getBackendList()) {
final Backend backend = stateMgr.getClusterInfo().getBackend(id);
final Cluster cluster = defaultCluster;
cluster.removeBackend(id);
backend.setDecommissioned(false);
backend.clearClusterName();
backend.setBackendState(Backend.BackendState.free);
}
}
/*
* Truncate specified table or partitions.
* The main idea is:
*
* 1. using the same schema to create new table(partitions)
* 2. use the new created table(partitions) to replace the old ones.
*
* if no partition specified, it will truncate all partitions of this table, including all temp partitions,
* otherwise, it will only truncate those specified partitions.
*
*/
@Override
public void truncateTable(TruncateTableStmt truncateTableStmt) throws DdlException {
TableRef tblRef = truncateTableStmt.getTblRef();
TableName dbTbl = tblRef.getName();
Map<String, Long> origPartitions = Maps.newHashMap();
OlapTable copiedTbl;
Database db = getDb(dbTbl.getDb());
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbTbl.getDb());
}
boolean truncateEntireTable = tblRef.getPartitionNames() == null;
db.readLock();
try {
Table table = db.getTable(dbTbl.getTbl());
if (table == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, dbTbl.getTbl());
}
if (!table.isOlapOrLakeTable()) {
throw new DdlException("Only support truncate OLAP table or LAKE table");
}
OlapTable olapTable = (OlapTable) table;
if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) {
throw new DdlException("Table' state is not NORMAL: " + olapTable.getState());
}
if (!truncateEntireTable) {
for (String partName : tblRef.getPartitionNames().getPartitionNames()) {
Partition partition = olapTable.getPartition(partName);
if (partition == null) {
throw new DdlException("Partition " + partName + " does not exist");
}
origPartitions.put(partName, partition.getId());
}
} else {
for (Partition partition : olapTable.getPartitions()) {
origPartitions.put(partition.getName(), partition.getId());
}
}
copiedTbl = olapTable.selectiveCopy(origPartitions.keySet(), true, MaterializedIndex.IndexExtState.VISIBLE);
} finally {
db.readUnlock();
}
List<Partition> newPartitions = Lists.newArrayListWithCapacity(origPartitions.size());
Set<Long> tabletIdSet = Sets.newHashSet();
try {
for (Map.Entry<String, Long> entry : origPartitions.entrySet()) {
long oldPartitionId = entry.getValue();
long newPartitionId = getNextId();
String newPartitionName = entry.getKey();
PartitionInfo partitionInfo = copiedTbl.getPartitionInfo();
partitionInfo.setTabletType(newPartitionId, partitionInfo.getTabletType(oldPartitionId));
partitionInfo.setIsInMemory(newPartitionId, partitionInfo.getIsInMemory(oldPartitionId));
partitionInfo.setReplicationNum(newPartitionId, partitionInfo.getReplicationNum(oldPartitionId));
partitionInfo.setDataProperty(newPartitionId, partitionInfo.getDataProperty(oldPartitionId));
if (copiedTbl.isLakeTable()) {
partitionInfo.setStorageInfo(newPartitionId, partitionInfo.getStorageInfo(oldPartitionId));
}
Partition newPartition =
createPartition(db, copiedTbl, newPartitionId, newPartitionName, null, tabletIdSet);
newPartitions.add(newPartition);
}
buildPartitions(db, copiedTbl, newPartitions);
} catch (DdlException e) {
deleteUselessTabletAndShard(tabletIdSet, copiedTbl);
throw e;
}
Preconditions.checkState(origPartitions.size() == newPartitions.size());
db.writeLock();
try {
OlapTable olapTable = (OlapTable) db.getTable(copiedTbl.getId());
if (olapTable == null) {
throw new DdlException("Table[" + copiedTbl.getName() + "] is dropped");
}
if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) {
throw new DdlException("Table' state is not NORMAL: " + olapTable.getState());
}
for (Map.Entry<String, Long> entry : origPartitions.entrySet()) {
Partition partition = copiedTbl.getPartition(entry.getValue());
if (partition == null || !partition.getName().equalsIgnoreCase(entry.getKey())) {
throw new DdlException("Partition [" + entry.getKey() + "] is changed");
}
}
boolean metaChanged = false;
if (olapTable.getIndexNameToId().size() != copiedTbl.getIndexNameToId().size()) {
metaChanged = true;
} else {
Map<Long, Integer> copiedIndexIdToSchemaHash = copiedTbl.getIndexIdToSchemaHash();
for (Map.Entry<Long, Integer> entry : olapTable.getIndexIdToSchemaHash().entrySet()) {
long indexId = entry.getKey();
if (!copiedIndexIdToSchemaHash.containsKey(indexId)) {
metaChanged = true;
break;
}
if (!copiedIndexIdToSchemaHash.get(indexId).equals(entry.getValue())) {
metaChanged = true;
break;
}
}
}
if (metaChanged) {
throw new DdlException("Table[" + copiedTbl.getName() + "]'s meta has been changed. try again.");
}
truncateTableInternal(olapTable, newPartitions, truncateEntireTable, false);
TruncateTableInfo info = new TruncateTableInfo(db.getId(), olapTable.getId(), newPartitions,
truncateEntireTable);
editLog.logTruncateTable(info);
Set<Long> relatedMvs = olapTable.getRelatedMaterializedViews();
for (long mvId : relatedMvs) {
MaterializedView materializedView = (MaterializedView) db.getTable(mvId);
if (materializedView.isLoadTriggeredRefresh()) {
refreshMaterializedView(db.getFullName(), db.getTable(mvId).getName(),
Constants.TaskRunPriority.NORMAL.value());
}
}
} catch (DdlException e) {
deleteUselessTabletAndShard(tabletIdSet, copiedTbl);
throw e;
} catch (MetaNotFoundException e) {
LOG.warn("Table related materialized view can not be found", e);
} finally {
db.writeUnlock();
}
LOG.info("finished to truncate table {}, partitions: {}",
tblRef.getName().toSql(), tblRef.getPartitionNames());
}
private void deleteUselessTabletAndShard(Set<Long> tabletIdSet, OlapTable olapTable) {
for (Long tabletId : tabletIdSet) {
GlobalStateMgr.getCurrentInvertedIndex().deleteTablet(tabletId);
}
if (olapTable.isLakeTable() && !tabletIdSet.isEmpty()) {
stateMgr.getShardManager().getShardDeleter().addUnusedShardId(tabletIdSet);
editLog.logAddUnusedShard(tabletIdSet);
}
}
private void truncateTableInternal(OlapTable olapTable, List<Partition> newPartitions,
boolean isEntireTable, boolean isReplay) {
Set<Long> oldTabletIds = Sets.newHashSet();
for (Partition newPartition : newPartitions) {
Partition oldPartition = olapTable.replacePartition(newPartition);
for (MaterializedIndex index : oldPartition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
index.getTablets().stream().forEach(t -> {
oldTabletIds.add(t.getId());
});
}
}
if (isEntireTable) {
olapTable.dropAllTempPartitions();
}
for (Long tabletId : oldTabletIds) {
GlobalStateMgr.getCurrentInvertedIndex().deleteTablet(tabletId);
}
if (olapTable.isLakeTable() && !isReplay) {
stateMgr.getShardManager().getShardDeleter().addUnusedShardId(oldTabletIds);
editLog.logAddUnusedShard(oldTabletIds);
}
}
public void replayTruncateTable(TruncateTableInfo info) {
Database db = getDb(info.getDbId());
db.writeLock();
try {
OlapTable olapTable = (OlapTable) db.getTable(info.getTblId());
truncateTableInternal(olapTable, info.getPartitions(), info.isEntireTable(), true);
if (!GlobalStateMgr.isCheckpointThread()) {
TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex();
for (Partition partition : info.getPartitions()) {
long partitionId = partition.getId();
TStorageMedium medium = olapTable.getPartitionInfo().getDataProperty(
partitionId).getStorageMedium();
for (MaterializedIndex mIndex : partition.getMaterializedIndices(
MaterializedIndex.IndexExtState.ALL)) {
long indexId = mIndex.getId();
int schemaHash = olapTable.getSchemaHashByIndexId(indexId);
TabletMeta tabletMeta = new TabletMeta(db.getId(), olapTable.getId(),
partitionId, indexId, schemaHash, medium, olapTable.isLakeTable());
for (Tablet tablet : mIndex.getTablets()) {
long tabletId = tablet.getId();
invertedIndex.addTablet(tabletId, tabletMeta);
if (olapTable.isOlapTable()) {
for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) {
invertedIndex.addReplica(tabletId, replica);
}
}
}
}
}
}
} finally {
db.writeUnlock();
}
}
public void replayBackendTabletsInfo(BackendTabletsInfo backendTabletsInfo) {
List<Pair<Long, Integer>> tabletsWithSchemaHash = backendTabletsInfo.getTabletSchemaHash();
if (!tabletsWithSchemaHash.isEmpty()) {
for (Pair<Long, Integer> tabletInfo : tabletsWithSchemaHash) {
LOG.warn("find an old backendTabletsInfo for tablet {}, ignore it", tabletInfo.first);
}
return;
}
List<ReplicaPersistInfo> replicaPersistInfos = backendTabletsInfo.getReplicaPersistInfos();
for (ReplicaPersistInfo info : replicaPersistInfos) {
long dbId = info.getDbId();
Database db = getDb(dbId);
if (db == null) {
continue;
}
db.writeLock();
try {
OlapTable tbl = (OlapTable) db.getTable(info.getTableId());
if (tbl == null) {
continue;
}
Partition partition = tbl.getPartition(info.getPartitionId());
if (partition == null) {
continue;
}
MaterializedIndex mindex = partition.getIndex(info.getIndexId());
if (mindex == null) {
continue;
}
LocalTablet tablet = (LocalTablet) mindex.getTablet(info.getTabletId());
if (tablet == null) {
continue;
}
Replica replica = tablet.getReplicaById(info.getReplicaId());
if (replica != null) {
replica.setBad(true);
LOG.debug("get replica {} of tablet {} on backend {} to bad when replaying",
info.getReplicaId(), info.getTabletId(), info.getBackendId());
}
} finally {
db.writeUnlock();
}
}
}
public void convertDistributionType(Database db, OlapTable tbl) throws DdlException {
db.writeLock();
try {
if (!tbl.convertRandomDistributionToHashDistribution()) {
throw new DdlException("Table " + tbl.getName() + " is not random distributed");
}
TableInfo tableInfo = TableInfo.createForModifyDistribution(db.getId(), tbl.getId());
editLog.logModifyDistributionType(tableInfo);
LOG.info("finished to modify distribution type of table: " + tbl.getName());
} finally {
db.writeUnlock();
}
}
public void replayConvertDistributionType(TableInfo tableInfo) {
Database db = getDb(tableInfo.getDbId());
db.writeLock();
try {
OlapTable tbl = (OlapTable) db.getTable(tableInfo.getTableId());
tbl.convertRandomDistributionToHashDistribution();
LOG.info("replay modify distribution type of table: " + tbl.getName());
} finally {
db.writeUnlock();
}
}
/*
* The entry of replacing partitions with temp partitions.
*/
public void replaceTempPartition(Database db, String tableName, ReplacePartitionClause clause) throws DdlException {
List<String> partitionNames = clause.getPartitionNames();
List<String> tempPartitionNames =
clause.getTempPartitionNames().stream().distinct().collect(Collectors.toList());
boolean isStrictRange = clause.isStrictRange();
boolean useTempPartitionName = clause.useTempPartitionName();
db.writeLock();
try {
Table table = db.getTable(tableName);
if (table == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName);
}
if (!table.isOlapOrLakeTable()) {
throw new DdlException("Table[" + tableName + "] is not OLAP table or LAKE table");
}
OlapTable olapTable = (OlapTable) table;
for (String partName : partitionNames) {
if (!olapTable.checkPartitionNameExist(partName, false)) {
throw new DdlException("Partition[" + partName + "] does not exist");
}
}
for (String partName : tempPartitionNames) {
if (!olapTable.checkPartitionNameExist(partName, true)) {
throw new DdlException("Temp partition[" + partName + "] does not exist");
}
}
olapTable.replaceTempPartitions(partitionNames, tempPartitionNames, isStrictRange, useTempPartitionName);
ReplacePartitionOperationLog info = new ReplacePartitionOperationLog(db.getId(), olapTable.getId(),
partitionNames, tempPartitionNames, isStrictRange, useTempPartitionName);
editLog.logReplaceTempPartition(info);
LOG.info("finished to replace partitions {} with temp partitions {} from table: {}",
clause.getPartitionNames(), clause.getTempPartitionNames(), tableName);
} finally {
db.writeUnlock();
}
}
public void replayReplaceTempPartition(ReplacePartitionOperationLog replaceTempPartitionLog) {
Database db = getDb(replaceTempPartitionLog.getDbId());
if (db == null) {
return;
}
db.writeLock();
try {
OlapTable olapTable = (OlapTable) db.getTable(replaceTempPartitionLog.getTblId());
if (olapTable == null) {
return;
}
olapTable.replaceTempPartitions(replaceTempPartitionLog.getPartitions(),
replaceTempPartitionLog.getTempPartitions(),
replaceTempPartitionLog.isStrictRange(),
replaceTempPartitionLog.useTempPartitionName());
} catch (DdlException e) {
LOG.warn("should not happen.", e);
} finally {
db.writeUnlock();
}
}
public void checkTablets(AdminCheckTabletsStmt stmt) {
AdminCheckTabletsStmt.CheckType type = stmt.getType();
if (type == AdminCheckTabletsStmt.CheckType.CONSISTENCY) {
stateMgr.getConsistencyChecker().addTabletsToCheck(stmt.getTabletIds());
}
}
public void setReplicaStatus(AdminSetReplicaStatusStmt stmt) {
long tabletId = stmt.getTabletId();
long backendId = stmt.getBackendId();
Replica.ReplicaStatus status = stmt.getStatus();
setReplicaStatusInternal(tabletId, backendId, status, false);
}
public void replaySetReplicaStatus(SetReplicaStatusOperationLog log) {
setReplicaStatusInternal(log.getTabletId(), log.getBackendId(), log.getReplicaStatus(), true);
}
private void setReplicaStatusInternal(long tabletId, long backendId, Replica.ReplicaStatus status,
boolean isReplay) {
TabletMeta meta = stateMgr.getTabletInvertedIndex().getTabletMeta(tabletId);
if (meta == null) {
LOG.info("tablet {} does not exist", tabletId);
return;
}
long dbId = meta.getDbId();
Database db = getDb(dbId);
if (db == null) {
LOG.info("database {} of tablet {} does not exist", dbId, tabletId);
return;
}
db.writeLock();
try {
Replica replica = stateMgr.getTabletInvertedIndex().getReplica(tabletId, backendId);
if (replica == null) {
LOG.info("replica of tablet {} does not exist", tabletId);
return;
}
if (status == Replica.ReplicaStatus.BAD || status == Replica.ReplicaStatus.OK) {
if (replica.setBadForce(status == Replica.ReplicaStatus.BAD)) {
if (!isReplay) {
SetReplicaStatusOperationLog log =
new SetReplicaStatusOperationLog(backendId, tabletId, status);
editLog.logSetReplicaStatus(log);
}
LOG.info("set replica {} of tablet {} on backend {} as {}. is replay: {}",
replica.getId(), tabletId, backendId, status, isReplay);
}
}
} finally {
db.writeUnlock();
}
}
public void onEraseDatabase(long dbId) {
stateMgr.getGlobalTransactionMgr().removeDatabaseTransactionMgr(dbId);
}
public void onEraseTable(@NotNull OlapTable olapTable) {
TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex();
Collection<Partition> allPartitions = olapTable.getAllPartitions();
for (Partition partition : allPartitions) {
for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
for (Tablet tablet : index.getTablets()) {
invertedIndex.deleteTablet(tablet.getId());
}
}
}
colocateTableIndex.removeTable(olapTable.getId());
}
public Set<Long> onErasePartition(Partition partition) {
Set<Long> tabletIdSet = new HashSet<Long>();
TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex();
for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
for (Tablet tablet : index.getTablets()) {
long tabletId = tablet.getId();
TabletMeta tabletMeta = invertedIndex.getTabletMeta(tabletId);
if (tabletMeta != null && tabletMeta.isLakeTablet()) {
tabletIdSet.add(tabletId);
}
invertedIndex.deleteTablet(tabletId);
}
}
return tabletIdSet;
}
@VisibleForTesting
public void clear() {
if (idToDb != null) {
idToDb.clear();
}
if (fullNameToDb != null) {
fullNameToDb.clear();
}
System.gc();
}
@VisibleForTesting
public OlapTable getCopiedTable(Database db, OlapTable olapTable, List<Long> sourcePartitionIds,
Map<Long, String> origPartitions) {
OlapTable copiedTbl;
db.readLock();
try {
if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) {
throw new RuntimeException("Table' state is not NORMAL: " + olapTable.getState()
+ ", tableId:" + olapTable.getId() + ", tabletName:" + olapTable.getName());
}
for (Long id : sourcePartitionIds) {
origPartitions.put(id, olapTable.getPartition(id).getName());
}
copiedTbl = olapTable.selectiveCopy(origPartitions.values(), true, MaterializedIndex.IndexExtState.VISIBLE);
} finally {
db.readUnlock();
}
return copiedTbl;
}
@VisibleForTesting
public List<Partition> getNewPartitionsFromPartitions(Database db, OlapTable olapTable, List<Long> sourcePartitionIds,
Map<Long, String> origPartitions, OlapTable copiedTbl,
String namePostfix, Set<Long> tabletIdSet, List<Long> tmpPartitionIds)
throws DdlException {
List<Partition> newPartitions = Lists.newArrayListWithCapacity(sourcePartitionIds.size());
for (int i = 0; i < sourcePartitionIds.size(); ++i) {
long newPartitionId = tmpPartitionIds.get(i);
long sourcePartitionId = sourcePartitionIds.get(i);
String newPartitionName = origPartitions.get(sourcePartitionId) + namePostfix;
if (olapTable.checkPartitionNameExist(newPartitionName, true)) {
LOG.warn("partition:{} already exists in table:{}", newPartitionName, olapTable.getName());
continue;
}
PartitionInfo partitionInfo = copiedTbl.getPartitionInfo();
partitionInfo.setTabletType(newPartitionId, partitionInfo.getTabletType(sourcePartitionId));
partitionInfo.setIsInMemory(newPartitionId, partitionInfo.getIsInMemory(sourcePartitionId));
partitionInfo.setReplicationNum(newPartitionId, partitionInfo.getReplicationNum(sourcePartitionId));
partitionInfo.setDataProperty(newPartitionId, partitionInfo.getDataProperty(sourcePartitionId));
Partition newPartition =
createPartition(db, copiedTbl, newPartitionId, newPartitionName, null, tabletIdSet);
newPartitions.add(newPartition);
}
return newPartitions;
}
public List<Partition> createTempPartitionsFromPartitions(Database db, Table table,
String namePostfix, List<Long> sourcePartitionIds,
List<Long> tmpPartitionIds) {
Preconditions.checkState(table instanceof OlapTable);
OlapTable olapTable = (OlapTable) table;
Map<Long, String> origPartitions = Maps.newHashMap();
OlapTable copiedTbl = getCopiedTable(db, olapTable, sourcePartitionIds, origPartitions);
List<Partition> newPartitions = null;
Set<Long> tabletIdSet = Sets.newHashSet();
try {
newPartitions = getNewPartitionsFromPartitions(db, olapTable, sourcePartitionIds, origPartitions,
copiedTbl, namePostfix, tabletIdSet, tmpPartitionIds);
buildPartitions(db, copiedTbl, newPartitions);
} catch (Exception e) {
for (Long tabletId : tabletIdSet) {
GlobalStateMgr.getCurrentInvertedIndex().deleteTablet(tabletId);
}
LOG.warn("create partitions from partitions failed.", e);
throw new RuntimeException("create partitions failed", e);
}
return newPartitions;
}
} | class LocalMetastore implements ConnectorMetadata {
private static final Logger LOG = LogManager.getLogger(LocalMetastore.class);
private final ConcurrentHashMap<Long, Database> idToDb = new ConcurrentHashMap<>();
private final ConcurrentHashMap<String, Database> fullNameToDb = new ConcurrentHashMap<>();
private Cluster defaultCluster;
private final GlobalStateMgr stateMgr;
private EditLog editLog;
private final CatalogRecycleBin recycleBin;
private ColocateTableIndex colocateTableIndex;
private final SystemInfoService systemInfoService;
public LocalMetastore(GlobalStateMgr globalStateMgr, CatalogRecycleBin recycleBin,
ColocateTableIndex colocateTableIndex, SystemInfoService systemInfoService) {
this.stateMgr = globalStateMgr;
this.recycleBin = recycleBin;
this.colocateTableIndex = colocateTableIndex;
this.systemInfoService = systemInfoService;
}
private boolean tryLock(boolean mustLock) {
return stateMgr.tryLock(mustLock);
}
private void unlock() {
stateMgr.unlock();
}
private long getNextId() {
return stateMgr.getNextId();
}
public void setEditLog(EditLog editLog) {
this.editLog = editLog;
}
public void recreateTabletInvertIndex() {
if (isCheckpointThread()) {
return;
}
TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex();
for (Database db : this.fullNameToDb.values()) {
long dbId = db.getId();
for (Table table : db.getTables()) {
if (!table.isNativeTable()) {
continue;
}
OlapTable olapTable = (OlapTable) table;
long tableId = olapTable.getId();
Collection<Partition> allPartitions = olapTable.getAllPartitions();
for (Partition partition : allPartitions) {
long partitionId = partition.getId();
TStorageMedium medium = olapTable.getPartitionInfo().getDataProperty(
partitionId).getStorageMedium();
for (MaterializedIndex index : partition
.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
long indexId = index.getId();
int schemaHash = olapTable.getSchemaHashByIndexId(indexId);
TabletMeta tabletMeta = new TabletMeta(dbId, tableId, partitionId, indexId, schemaHash, medium,
table.isLakeTable());
for (Tablet tablet : index.getTablets()) {
long tabletId = tablet.getId();
invertedIndex.addTablet(tabletId, tabletMeta);
if (table.isLocalTable()) {
for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) {
invertedIndex.addReplica(tabletId, replica);
if (MetaContext.get().getMetaVersion() < FeMetaVersion.VERSION_48) {
replica.setSchemaHash(schemaHash);
}
}
}
}
}
}
}
}
}
public long loadDb(DataInputStream dis, long checksum) throws IOException {
int dbCount = dis.readInt();
long newChecksum = checksum ^ dbCount;
for (long i = 0; i < dbCount; ++i) {
Database db = new Database();
db.readFields(dis);
newChecksum ^= db.getId();
idToDb.put(db.getId(), db);
fullNameToDb.put(db.getFullName(), db);
stateMgr.getGlobalTransactionMgr().addDatabaseTransactionMgr(db.getId());
db.getMaterializedViews().stream().forEach(Table::onCreate);
}
LOG.info("finished replay databases from image");
return newChecksum;
}
public long saveDb(DataOutputStream dos, long checksum) throws IOException {
int dbCount = idToDb.size() - 1;
checksum ^= dbCount;
dos.writeInt(dbCount);
for (Map.Entry<Long, Database> entry : idToDb.entrySet()) {
Database db = entry.getValue();
String dbName = db.getFullName();
if (!InfoSchemaDb.isInfoSchemaDb(dbName)) {
checksum ^= entry.getKey();
db.readLock();
try {
db.write(dos);
} finally {
db.readUnlock();
}
}
}
return checksum;
}
@Override
public void createDb(String dbName) throws DdlException, AlreadyExistsException {
long id = 0L;
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
if (fullNameToDb.containsKey(dbName)) {
throw new AlreadyExistsException("Database Already Exists");
} else {
id = getNextId();
Database db = new Database(id, dbName);
unprotectCreateDb(db);
editLog.logCreateDb(db);
}
} finally {
unlock();
}
LOG.info("createDb dbName = " + dbName + ", id = " + id);
}
public void unprotectCreateDb(Database db) {
idToDb.put(db.getId(), db);
fullNameToDb.put(db.getFullName(), db);
final Cluster cluster = defaultCluster;
cluster.addDb(db.getFullName(), db.getId());
stateMgr.getGlobalTransactionMgr().addDatabaseTransactionMgr(db.getId());
}
public ConcurrentHashMap<Long, Database> getIdToDb() {
return idToDb;
}
public void replayCreateDb(Database db) {
tryLock(true);
try {
unprotectCreateDb(db);
LOG.info("finish replay create db, name: {}, id: {}", db.getOriginName(), db.getId());
} finally {
unlock();
}
}
@Override
public void dropDb(String dbName, boolean isForceDrop) throws DdlException, MetaNotFoundException {
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
List<Runnable> runnableList;
try {
if (!fullNameToDb.containsKey(dbName)) {
throw new MetaNotFoundException("Database not found");
}
Database db = this.fullNameToDb.get(dbName);
db.writeLock();
try {
if (!isForceDrop && stateMgr.getGlobalTransactionMgr().existCommittedTxns(db.getId(), null, null)) {
throw new DdlException("There are still some transactions in the COMMITTED state waiting to be completed. " +
"The database [" + dbName +
"] cannot be dropped. If you want to forcibly drop(cannot be recovered)," +
" please use \"DROP database FORCE\".");
}
Set<String> tableNames = db.getTableNamesWithLock();
runnableList = unprotectDropDb(db, isForceDrop, false);
if (!isForceDrop) {
recycleBin.recycleDatabase(db, tableNames);
} else {
stateMgr.onEraseDatabase(db.getId());
}
} finally {
db.writeUnlock();
}
idToDb.remove(db.getId());
fullNameToDb.remove(db.getFullName());
final Cluster cluster = defaultCluster;
cluster.removeDb(dbName, db.getId());
DropDbInfo info = new DropDbInfo(db.getFullName(), isForceDrop);
editLog.logDropDb(info);
LOG.info("finish drop database[{}], id: {}, is force : {}", dbName, db.getId(), isForceDrop);
} finally {
unlock();
}
for (Runnable runnable : runnableList) {
runnable.run();
}
}
@NotNull
public List<Runnable> unprotectDropDb(Database db, boolean isForeDrop, boolean isReplay) {
List<Runnable> runnableList = new ArrayList<>();
for (Table table : db.getTables()) {
Runnable runnable = db.unprotectDropTable(table.getId(), isForeDrop, isReplay);
if (runnable != null) {
runnableList.add(runnable);
}
}
return runnableList;
}
public void replayDropDb(String dbName, boolean isForceDrop) throws DdlException {
List<Runnable> runnableList;
tryLock(true);
try {
Database db = fullNameToDb.get(dbName);
db.writeLock();
try {
Set<String> tableNames = db.getTableNamesWithLock();
runnableList = unprotectDropDb(db, isForceDrop, true);
if (!isForceDrop) {
recycleBin.recycleDatabase(db, tableNames);
} else {
stateMgr.onEraseDatabase(db.getId());
}
} finally {
db.writeUnlock();
}
fullNameToDb.remove(dbName);
idToDb.remove(db.getId());
final Cluster cluster = defaultCluster;
cluster.removeDb(dbName, db.getId());
LOG.info("finish replay drop db, name: {}, id: {}", dbName, db.getId());
} finally {
unlock();
}
for (Runnable runnable : runnableList) {
runnable.run();
}
}
public void recoverDatabase(RecoverDbStmt recoverStmt) throws DdlException {
if (getDb(recoverStmt.getDbName()) != null) {
throw new DdlException("Database[" + recoverStmt.getDbName() + "] already exist.");
}
Database db = recycleBin.recoverDatabase(recoverStmt.getDbName());
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
if (fullNameToDb.containsKey(db.getFullName())) {
throw new DdlException("Database[" + db.getOriginName() + "] already exist.");
}
fullNameToDb.put(db.getFullName(), db);
idToDb.put(db.getId(), db);
final Cluster cluster = defaultCluster;
cluster.addDb(db.getFullName(), db.getId());
RecoverInfo recoverInfo = new RecoverInfo(db.getId(), -1L, -1L);
editLog.logRecoverDb(recoverInfo);
} finally {
unlock();
}
LOG.info("finish recover database, name: {}, id: {}", recoverStmt.getDbName(), db.getId());
}
public void recoverTable(RecoverTableStmt recoverStmt) throws DdlException {
String dbName = recoverStmt.getDbName();
Database db = null;
if ((db = getDb(dbName)) == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
String tableName = recoverStmt.getTableName();
db.writeLock();
try {
Table table = db.getTable(tableName);
if (table != null) {
ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_EXISTS_ERROR, tableName);
}
if (!recycleBin.recoverTable(db, tableName)) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName);
}
} finally {
db.writeUnlock();
}
}
public void recoverPartition(RecoverPartitionStmt recoverStmt) throws DdlException {
String dbName = recoverStmt.getDbName();
Database db = null;
if ((db = getDb(dbName)) == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
String tableName = recoverStmt.getTableName();
db.writeLock();
try {
Table table = db.getTable(tableName);
if (table == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName);
}
if (!table.isOlapOrLakeTable()) {
throw new DdlException("table[" + tableName + "] is not OLAP table or LAKE table");
}
OlapTable olapTable = (OlapTable) table;
String partitionName = recoverStmt.getPartitionName();
if (olapTable.getPartition(partitionName) != null) {
throw new DdlException("partition[" + partitionName + "] already exist in table[" + tableName + "]");
}
recycleBin.recoverPartition(db.getId(), olapTable, partitionName);
} finally {
db.writeUnlock();
}
}
public void replayEraseDatabase(long dbId) {
recycleBin.replayEraseDatabase(dbId);
}
public void replayRecoverDatabase(RecoverInfo info) {
long dbId = info.getDbId();
Database db = recycleBin.replayRecoverDatabase(dbId);
replayCreateDb(db);
LOG.info("replay recover db[{}], name: {}", dbId, db.getOriginName());
}
public void alterDatabaseQuota(AlterDatabaseQuotaStmt stmt) throws DdlException {
String dbName = stmt.getDbName();
Database db = getDb(dbName);
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
AlterDatabaseQuotaStmt.QuotaType quotaType = stmt.getQuotaType();
if (quotaType == AlterDatabaseQuotaStmt.QuotaType.DATA) {
db.setDataQuotaWithLock(stmt.getQuota());
} else if (quotaType == AlterDatabaseQuotaStmt.QuotaType.REPLICA) {
db.setReplicaQuotaWithLock(stmt.getQuota());
}
long quota = stmt.getQuota();
DatabaseInfo dbInfo = new DatabaseInfo(db.getFullName(), "", quota, quotaType);
editLog.logAlterDb(dbInfo);
}
public void replayAlterDatabaseQuota(String dbName, long quota, AlterDatabaseQuotaStmt.QuotaType quotaType) {
Database db = getDb(dbName);
Preconditions.checkNotNull(db);
if (quotaType == AlterDatabaseQuotaStmt.QuotaType.DATA) {
db.setDataQuotaWithLock(quota);
} else if (quotaType == AlterDatabaseQuotaStmt.QuotaType.REPLICA) {
db.setReplicaQuotaWithLock(quota);
}
}
public void renameDatabase(AlterDatabaseRename stmt) throws DdlException {
String fullDbName = stmt.getDbName();
String newFullDbName = stmt.getNewDbName();
if (fullDbName.equals(newFullDbName)) {
throw new DdlException("Same database name");
}
Database db;
Cluster cluster;
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
cluster = defaultCluster;
if (cluster == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_NO_EXISTS, SystemInfoService.DEFAULT_CLUSTER);
}
db = fullNameToDb.get(fullDbName);
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, fullDbName);
}
if (fullNameToDb.get(newFullDbName) != null) {
throw new DdlException("Database name[" + newFullDbName + "] is already used");
}
cluster.removeDb(db.getFullName(), db.getId());
cluster.addDb(newFullDbName, db.getId());
db.setNameWithLock(newFullDbName);
fullNameToDb.remove(fullDbName);
fullNameToDb.put(newFullDbName, db);
DatabaseInfo dbInfo =
new DatabaseInfo(fullDbName, newFullDbName, -1L, AlterDatabaseQuotaStmt.QuotaType.NONE);
editLog.logDatabaseRename(dbInfo);
} finally {
unlock();
}
LOG.info("rename database[{}] to [{}], id: {}", fullDbName, newFullDbName, db.getId());
}
public void replayRenameDatabase(String dbName, String newDbName) {
tryLock(true);
try {
Database db = fullNameToDb.get(dbName);
Cluster cluster = defaultCluster;
cluster.removeDb(db.getFullName(), db.getId());
db.setName(newDbName);
cluster.addDb(newDbName, db.getId());
fullNameToDb.remove(dbName);
fullNameToDb.put(newDbName, db);
LOG.info("replay rename database {} to {}, id: {}", dbName, newDbName, db.getId());
} finally {
unlock();
}
}
/**
* Following is the step to create an olap table:
* 1. create columns
* 2. create partition info
* 3. create distribution info
* 4. set table id and base index id
* 5. set bloom filter columns
* 6. set and build TableProperty includes:
* 6.1. dynamicProperty
* 6.2. replicationNum
* 6.3. inMemory
* 6.4. storageFormat
* 7. set index meta
* 8. check colocation properties
* 9. create tablet in BE
* 10. add this table to FE's meta
* 11. add this table to ColocateGroup if necessary
*/
@Override
public void createTable(CreateTableStmt stmt) throws DdlException {
String engineName = stmt.getEngineName();
String dbName = stmt.getDbName();
String tableName = stmt.getTableName();
Database db = getDb(stmt.getDbName());
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
if (!stmt.isExternal()) {
systemInfoService.checkClusterCapacity();
db.checkQuota();
}
db.readLock();
try {
if (db.getTable(tableName) != null) {
if (stmt.isSetIfNotExists()) {
LOG.info("create table[{}] which already exists", tableName);
return;
} else {
ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_EXISTS_ERROR, tableName);
}
}
} finally {
db.readUnlock();
}
if (stmt.isOlapOrLakeEngine()) {
createOlapOrLakeTable(db, stmt);
return;
} else if (engineName.equalsIgnoreCase("mysql")) {
createMysqlTable(db, stmt);
return;
} else if (engineName.equalsIgnoreCase("elasticsearch") || engineName.equalsIgnoreCase("es")) {
createEsTable(db, stmt);
return;
} else if (engineName.equalsIgnoreCase("hive")) {
createHiveTable(db, stmt);
return;
} else if (engineName.equalsIgnoreCase("iceberg")) {
createIcebergTable(db, stmt);
return;
} else if (engineName.equalsIgnoreCase("hudi")) {
createHudiTable(db, stmt);
return;
} else if (engineName.equalsIgnoreCase("jdbc")) {
createJDBCTable(db, stmt);
return;
} else {
ErrorReport.reportDdlException(ErrorCode.ERR_UNKNOWN_STORAGE_ENGINE, engineName);
}
Preconditions.checkState(false);
}
public void createTableLike(CreateTableLikeStmt stmt) throws DdlException {
try {
Database db = getDb(stmt.getExistedDbName());
List<String> createTableStmt = Lists.newArrayList();
db.readLock();
try {
Table table = db.getTable(stmt.getExistedTableName());
if (table == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, stmt.getExistedTableName());
}
GlobalStateMgr.getDdlStmt(stmt.getDbName(), table, createTableStmt, null, null, false, false);
if (createTableStmt.isEmpty()) {
ErrorReport.reportDdlException(ErrorCode.ERROR_CREATE_TABLE_LIKE_EMPTY, "CREATE");
}
} finally {
db.readUnlock();
}
StatementBase statementBase = com.starrocks.sql.parser.SqlParser.parse(createTableStmt.get(0),
ConnectContext.get().getSessionVariable().getSqlMode()).get(0);
com.starrocks.sql.analyzer.Analyzer.analyze(statementBase, ConnectContext.get());
if (statementBase instanceof CreateTableStmt) {
CreateTableStmt parsedCreateTableStmt = (CreateTableStmt) statementBase;
parsedCreateTableStmt.setTableName(stmt.getTableName());
if (stmt.isSetIfNotExists()) {
parsedCreateTableStmt.setIfNotExists();
}
createTable(parsedCreateTableStmt);
} else if (statementBase instanceof CreateViewStmt) {
ErrorReport.reportDdlException(ErrorCode.ERROR_CREATE_TABLE_LIKE_UNSUPPORTED_VIEW);
}
} catch (UserException e) {
throw new DdlException("Failed to execute CREATE TABLE LIKE " + stmt.getExistedTableName() + ". Reason: " +
e.getMessage(), e);
}
}
@Override
public void addPartitions(Database db, String tableName, AddPartitionClause addPartitionClause)
throws DdlException, AnalysisException {
PartitionDesc partitionDesc = addPartitionClause.getPartitionDesc();
if (partitionDesc instanceof SingleItemListPartitionDesc
|| partitionDesc instanceof MultiItemListPartitionDesc
|| partitionDesc instanceof SingleRangePartitionDesc) {
addPartitions(db, tableName, ImmutableList.of(partitionDesc),
addPartitionClause);
} else if (partitionDesc instanceof MultiRangePartitionDesc) {
db.readLock();
RangePartitionInfo rangePartitionInfo;
Map<String, String> tableProperties;
try {
Table table = db.getTable(tableName);
CatalogUtils.checkTableExist(db, tableName);
CatalogUtils.checkNativeTable(db, table);
OlapTable olapTable = (OlapTable) table;
tableProperties = olapTable.getTableProperty().getProperties();
PartitionInfo partitionInfo = olapTable.getPartitionInfo();
rangePartitionInfo = (RangePartitionInfo) partitionInfo;
} finally {
db.readUnlock();
}
if (rangePartitionInfo == null) {
throw new DdlException("Alter batch get partition info failed.");
}
List<Column> partitionColumns = rangePartitionInfo.getPartitionColumns();
if (partitionColumns.size() != 1) {
throw new DdlException("Alter batch build partition only support single range column.");
}
Column firstPartitionColumn = partitionColumns.get(0);
MultiRangePartitionDesc multiRangePartitionDesc = (MultiRangePartitionDesc) partitionDesc;
Map<String, String> properties = addPartitionClause.getProperties();
if (properties == null) {
properties = Maps.newHashMap();
}
if (tableProperties != null && tableProperties.containsKey(DynamicPartitionProperty.START_DAY_OF_WEEK)) {
properties.put(DynamicPartitionProperty.START_DAY_OF_WEEK,
tableProperties.get(DynamicPartitionProperty.START_DAY_OF_WEEK));
}
List<SingleRangePartitionDesc> singleRangePartitionDescs = multiRangePartitionDesc
.convertToSingle(firstPartitionColumn.getType(), properties);
List<PartitionDesc> partitionDescs = singleRangePartitionDescs.stream().map(item -> {
PartitionDesc desc = item;
return desc;
}).collect(Collectors.toList());
addPartitions(db, tableName, partitionDescs, addPartitionClause);
}
}
private OlapTable checkTable(Database db, String tableName) throws DdlException {
CatalogUtils.checkTableExist(db, tableName);
Table table = db.getTable(tableName);
CatalogUtils.checkNativeTable(db, table);
OlapTable olapTable = (OlapTable) table;
CatalogUtils.checkTableState(olapTable, tableName);
return olapTable;
}
private void checkPartitionType(PartitionInfo partitionInfo) throws DdlException {
PartitionType partitionType = partitionInfo.getType();
if (partitionType != PartitionType.RANGE && partitionType != PartitionType.LIST) {
throw new DdlException("Only support adding partition to range/list partitioned table");
}
}
private void analyzeAddPartition(OlapTable olapTable, List<PartitionDesc> partitionDescs,
AddPartitionClause addPartitionClause, PartitionInfo partitionInfo)
throws DdlException, AnalysisException, NotImplementedException {
Set<String> existPartitionNameSet =
CatalogUtils.checkPartitionNameExistForAddPartitions(olapTable, partitionDescs);
Map<String, String> properties = olapTable.getProperties();
Map<String, String> clauseProperties = addPartitionClause.getProperties();
if (clauseProperties != null && !clauseProperties.isEmpty()) {
properties.putAll(clauseProperties);
}
for (PartitionDesc partitionDesc : partitionDescs) {
Map<String, String> cloneProperties = Maps.newHashMap(properties);
Map<String, String> sourceProperties = partitionDesc.getProperties();
if (sourceProperties != null && !sourceProperties.isEmpty()) {
cloneProperties.putAll(sourceProperties);
}
if (partitionDesc instanceof SingleRangePartitionDesc) {
RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo;
SingleRangePartitionDesc singleRangePartitionDesc = ((SingleRangePartitionDesc) partitionDesc);
singleRangePartitionDesc.analyze(rangePartitionInfo.getPartitionColumns().size(), cloneProperties);
if (!existPartitionNameSet.contains(singleRangePartitionDesc.getPartitionName())) {
rangePartitionInfo.checkAndCreateRange(singleRangePartitionDesc,
addPartitionClause.isTempPartition());
}
} else if (partitionDesc instanceof SingleItemListPartitionDesc
|| partitionDesc instanceof MultiItemListPartitionDesc) {
List<ColumnDef> columnDefList = partitionInfo.getPartitionColumns().stream()
.map(item -> new ColumnDef(item.getName(), new TypeDef(item.getType())))
.collect(Collectors.toList());
partitionDesc.analyze(columnDefList, cloneProperties);
CatalogUtils.checkPartitionValuesExistForAddListPartition(olapTable, partitionDesc);
} else {
throw new DdlException("Only support adding partition to range/list partitioned table");
}
}
}
private DistributionInfo getDistributionInfo(OlapTable olapTable, AddPartitionClause addPartitionClause)
throws DdlException {
DistributionInfo distributionInfo;
List<Column> baseSchema = olapTable.getBaseSchema();
DistributionInfo defaultDistributionInfo = olapTable.getDefaultDistributionInfo();
DistributionDesc distributionDesc = addPartitionClause.getDistributionDesc();
if (distributionDesc != null) {
distributionInfo = distributionDesc.toDistributionInfo(baseSchema);
if (distributionInfo.getType() != defaultDistributionInfo.getType()) {
throw new DdlException("Cannot assign different distribution type. default is: "
+ defaultDistributionInfo.getType());
}
if (distributionInfo.getType() == DistributionInfo.DistributionInfoType.HASH) {
HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) distributionInfo;
List<Column> newDistriCols = hashDistributionInfo.getDistributionColumns();
List<Column> defaultDistriCols = ((HashDistributionInfo) defaultDistributionInfo)
.getDistributionColumns();
if (!newDistriCols.equals(defaultDistriCols)) {
throw new DdlException("Cannot assign hash distribution with different distribution cols. "
+ "default is: " + defaultDistriCols);
}
if (hashDistributionInfo.getBucketNum() <= 0) {
throw new DdlException("Cannot assign hash distribution buckets less than 1");
}
}
} else {
distributionInfo = defaultDistributionInfo;
}
return distributionInfo;
}
private void checkColocation(Database db, OlapTable olapTable, DistributionInfo distributionInfo,
List<PartitionDesc> partitionDescs)
throws DdlException {
if (colocateTableIndex.isColocateTable(olapTable.getId())) {
String fullGroupName = db.getId() + "_" + olapTable.getColocateGroup();
ColocateGroupSchema groupSchema = colocateTableIndex.getGroupSchema(fullGroupName);
Preconditions.checkNotNull(groupSchema);
groupSchema.checkDistribution(distributionInfo);
for (PartitionDesc partitionDesc : partitionDescs) {
groupSchema.checkReplicationNum(partitionDesc.getReplicationNum());
}
}
}
private void checkDataProperty(List<PartitionDesc> partitionDescs) {
for (PartitionDesc partitionDesc : partitionDescs) {
DataProperty dataProperty = partitionDesc.getPartitionDataProperty();
Preconditions.checkNotNull(dataProperty);
}
}
private List<Partition> createPartitionList(Database db, OlapTable copiedTable, List<PartitionDesc> partitionDescs,
HashMap<String, Set<Long>> partitionNameToTabletSet,
Set<Long> tabletIdSetForAll)
throws DdlException {
List<Partition> partitionList = Lists.newArrayListWithCapacity(partitionDescs.size());
for (PartitionDesc partitionDesc : partitionDescs) {
long partitionId = getNextId();
DataProperty dataProperty = partitionDesc.getPartitionDataProperty();
String partitionName = partitionDesc.getPartitionName();
Long version = partitionDesc.getVersionInfo();
Set<Long> tabletIdSet = Sets.newHashSet();
copiedTable.getPartitionInfo().setDataProperty(partitionId, dataProperty);
copiedTable.getPartitionInfo().setTabletType(partitionId, partitionDesc.getTabletType());
copiedTable.getPartitionInfo()
.setReplicationNum(partitionId, partitionDesc.getReplicationNum());
copiedTable.getPartitionInfo().setIsInMemory(partitionId, partitionDesc.isInMemory());
copiedTable.getPartitionInfo().setStorageInfo(partitionId, partitionDesc.getStorageInfo());
Partition partition =
createPartition(db, copiedTable, partitionId, partitionName, version, tabletIdSet);
partitionList.add(partition);
tabletIdSetForAll.addAll(tabletIdSet);
partitionNameToTabletSet.put(partitionName, tabletIdSet);
}
return partitionList;
}
private void checkIfMetaChange(OlapTable olapTable, OlapTable copiedTable, String tableName) throws DdlException {
boolean metaChanged = false;
if (olapTable.getIndexNameToId().size() != copiedTable.getIndexNameToId().size()) {
metaChanged = true;
} else {
for (Map.Entry<Long, MaterializedIndexMeta> entry : olapTable.getIndexIdToMeta().entrySet()) {
long indexId = entry.getKey();
if (!copiedTable.getIndexIdToMeta().containsKey(indexId)) {
metaChanged = true;
break;
}
if (copiedTable.getIndexIdToMeta().get(indexId).getSchemaHash() !=
entry.getValue().getSchemaHash()) {
metaChanged = true;
break;
}
}
}
if (metaChanged) {
throw new DdlException("Table[" + tableName + "]'s meta has been changed. try again.");
}
}
private void updatePartitionInfo(PartitionInfo partitionInfo, List<Partition> partitionList,
List<PartitionDesc> partitionDescs, Set<String> existPartitionNameSet,
AddPartitionClause addPartitionClause, OlapTable olapTable)
throws DdlException {
boolean isTempPartition = addPartitionClause.isTempPartition();
if (partitionInfo instanceof RangePartitionInfo) {
RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo;
rangePartitionInfo.handleNewRangePartitionDescs(partitionDescs,
partitionList, existPartitionNameSet, isTempPartition);
} else if (partitionInfo instanceof ListPartitionInfo) {
ListPartitionInfo listPartitionInfo = (ListPartitionInfo) partitionInfo;
listPartitionInfo.handleNewListPartitionDescs(partitionDescs,
partitionList, existPartitionNameSet, isTempPartition);
} else {
throw new DdlException("Only support adding partition to range/list partitioned table");
}
if (isTempPartition) {
for (Partition partition : partitionList) {
if (!existPartitionNameSet.contains(partition.getName())) {
olapTable.addTempPartition(partition);
}
}
} else {
for (Partition partition : partitionList) {
if (!existPartitionNameSet.contains(partition.getName())) {
olapTable.addPartition(partition);
}
}
}
}
private void addRangePartitionLog(Database db, OlapTable olapTable, List<PartitionDesc> partitionDescs,
AddPartitionClause addPartitionClause, PartitionInfo partitionInfo,
List<Partition> partitionList, Set<String> existPartitionNameSet) {
boolean isTempPartition = addPartitionClause.isTempPartition();
int partitionLen = partitionList.size();
List<PartitionPersistInfoV2> partitionInfoV2List = Lists.newArrayListWithCapacity(partitionLen);
if (partitionLen == 1) {
Partition partition = partitionList.get(0);
if (existPartitionNameSet.contains(partition.getName())) {
LOG.info("add partition[{}] which already exists", partition.getName());
return;
}
long partitionId = partition.getId();
if (olapTable.isLakeTable()) {
PartitionPersistInfoV2 info = new RangePartitionPersistInfo(db.getId(), olapTable.getId(), partition,
partitionDescs.get(0).getPartitionDataProperty(), partitionInfo.getReplicationNum(partition.getId()),
partitionInfo.getIsInMemory(partition.getId()), isTempPartition,
((RangePartitionInfo) partitionInfo).getRange(partition.getId()),
((SingleRangePartitionDesc) partitionDescs.get(0)).getStorageInfo());
partitionInfoV2List.add(info);
AddPartitionsInfoV2 infos = new AddPartitionsInfoV2(partitionInfoV2List);
editLog.logAddPartitions(infos);
} else {
PartitionPersistInfo info = new PartitionPersistInfo(db.getId(), olapTable.getId(), partition,
((RangePartitionInfo) partitionInfo).getRange(partitionId),
partitionDescs.get(0).getPartitionDataProperty(),
partitionInfo.getReplicationNum(partitionId),
partitionInfo.getIsInMemory(partitionId),
isTempPartition);
editLog.logAddPartition(info);
}
LOG.info("succeed in creating partition[{}], name: {}, temp: {}", partitionId,
partition.getName(), isTempPartition);
} else {
List<PartitionPersistInfo> partitionInfoList = Lists.newArrayListWithCapacity(partitionLen);
for (int i = 0; i < partitionLen; i++) {
Partition partition = partitionList.get(i);
if (!existPartitionNameSet.contains(partition.getName())) {
if (olapTable.isLakeTable()) {
PartitionPersistInfoV2 info = new RangePartitionPersistInfo(db.getId(), olapTable.getId(),
partition, partitionDescs.get(i).getPartitionDataProperty(),
partitionInfo.getReplicationNum(partition.getId()),
partitionInfo.getIsInMemory(partition.getId()), isTempPartition,
((RangePartitionInfo) partitionInfo).getRange(partition.getId()),
((SingleRangePartitionDesc) partitionDescs.get(i)).getStorageInfo());
partitionInfoV2List.add(info);
} else {
PartitionPersistInfo info =
new PartitionPersistInfo(db.getId(), olapTable.getId(), partition,
((RangePartitionInfo) partitionInfo).getRange(partition.getId()),
partitionDescs.get(i).getPartitionDataProperty(),
partitionInfo.getReplicationNum(partition.getId()),
partitionInfo.getIsInMemory(partition.getId()),
isTempPartition);
partitionInfoList.add(info);
}
}
}
if (olapTable.isLakeTable()) {
AddPartitionsInfoV2 infos = new AddPartitionsInfoV2(partitionInfoV2List);
editLog.logAddPartitions(infos);
} else {
AddPartitionsInfo infos = new AddPartitionsInfo(partitionInfoList);
editLog.logAddPartitions(infos);
}
for (Partition partition : partitionList) {
LOG.info("succeed in creating partitions[{}], name: {}, temp: {}", partition.getId(),
partition.getName(), isTempPartition);
}
}
}
private void addListPartitionLog(Database db, OlapTable olapTable, List<PartitionDesc> partitionDescs,
AddPartitionClause addPartitionClause, PartitionInfo partitionInfo,
List<Partition> partitionList, Set<String> existPartitionNameSet)
throws DdlException {
if (partitionList == null || partitionList.size() != 1) {
throw new DdlException("Only support add one partition when add list partition now");
}
boolean isTempPartition = addPartitionClause.isTempPartition();
Partition partition = partitionList.get(0);
if (existPartitionNameSet.contains(partition.getName())) {
LOG.info("add partition[{}] which already exists", partition.getName());
return;
}
long partitionId = partition.getId();
PartitionPersistInfoV2 info = new ListPartitionPersistInfo(db.getId(), olapTable.getId(), partition,
partitionDescs.get(0).getPartitionDataProperty(),
partitionInfo.getReplicationNum(partitionId),
partitionInfo.getIsInMemory(partitionId),
isTempPartition,
((ListPartitionInfo) partitionInfo).getIdToValues().get(partitionId),
((ListPartitionInfo) partitionInfo).getIdToMultiValues().get(partitionId));
editLog.logAddPartition(info);
LOG.info("succeed in creating list partition[{}], name: {}, temp: {}", partitionId,
partition.getName(), isTempPartition);
}
private void addPartitionLog(Database db, OlapTable olapTable, List<PartitionDesc> partitionDescs,
AddPartitionClause addPartitionClause, PartitionInfo partitionInfo,
List<Partition> partitionList, Set<String> existPartitionNameSet)
throws DdlException {
PartitionType partitionType = partitionInfo.getType();
if (partitionType == PartitionType.RANGE) {
addRangePartitionLog(db, olapTable, partitionDescs, addPartitionClause, partitionInfo, partitionList,
existPartitionNameSet);
} else if (partitionType == PartitionType.LIST) {
addListPartitionLog(db, olapTable, partitionDescs, addPartitionClause, partitionInfo, partitionList,
existPartitionNameSet);
} else {
throw new DdlException("Only support adding partition log to range/list partitioned table");
}
}
private void cleanExistPartitionNameSet(Set<String> existPartitionNameSet,
HashMap<String, Set<Long>> partitionNameToTabletSet) {
for (String partitionName : existPartitionNameSet) {
Set<Long> existPartitionTabletSet = partitionNameToTabletSet.get(partitionName);
if (existPartitionTabletSet == null) {
continue;
}
for (Long tabletId : existPartitionTabletSet) {
GlobalStateMgr.getCurrentInvertedIndex().deleteTablet(tabletId);
}
}
}
private void cleanTabletIdSetForAll(Set<Long> tabletIdSetForAll, boolean isLakeTable) {
for (Long tabletId : tabletIdSetForAll) {
GlobalStateMgr.getCurrentInvertedIndex().deleteTablet(tabletId);
}
if (isLakeTable) {
stateMgr.getShardManager().getShardDeleter().addUnusedShardId(tabletIdSetForAll);
}
}
private void addPartitions(Database db, String tableName, List<PartitionDesc> partitionDescs,
AddPartitionClause addPartitionClause) throws DdlException {
DistributionInfo distributionInfo;
OlapTable olapTable;
OlapTable copiedTable;
db.readLock();
try {
olapTable = checkTable(db, tableName);
PartitionInfo partitionInfo = olapTable.getPartitionInfo();
checkPartitionType(partitionInfo);
analyzeAddPartition(olapTable, partitionDescs, addPartitionClause, partitionInfo);
distributionInfo = getDistributionInfo(olapTable, addPartitionClause);
checkColocation(db, olapTable, distributionInfo, partitionDescs);
copiedTable = olapTable.selectiveCopy(null, false, MaterializedIndex.IndexExtState.VISIBLE);
copiedTable.setDefaultDistributionInfo(distributionInfo);
} catch (AnalysisException | NotImplementedException e) {
throw new DdlException(e.getMessage(), e);
} finally {
db.readUnlock();
}
Preconditions.checkNotNull(distributionInfo);
Preconditions.checkNotNull(olapTable);
Preconditions.checkNotNull(copiedTable);
checkDataProperty(partitionDescs);
Set<Long> tabletIdSetForAll = Sets.newHashSet();
HashMap<String, Set<Long>> partitionNameToTabletSet = Maps.newHashMap();
try {
List<Partition> partitionList =
createPartitionList(db, copiedTable, partitionDescs, partitionNameToTabletSet, tabletIdSetForAll);
buildPartitions(db, copiedTable, partitionList);
db.writeLock();
Set<String> existPartitionNameSet = Sets.newHashSet();
try {
olapTable = checkTable(db, tableName);
existPartitionNameSet = CatalogUtils.checkPartitionNameExistForAddPartitions(olapTable,
partitionDescs);
if (existPartitionNameSet.size() > 0) {
for (String partitionName : existPartitionNameSet) {
LOG.info("add partition[{}] which already exists", partitionName);
}
}
checkIfMetaChange(olapTable, copiedTable, tableName);
PartitionInfo partitionInfo = olapTable.getPartitionInfo();
checkPartitionType(partitionInfo);
updatePartitionInfo(partitionInfo, partitionList, partitionDescs, existPartitionNameSet,
addPartitionClause, olapTable);
addPartitionLog(db, olapTable, partitionDescs, addPartitionClause, partitionInfo, partitionList,
existPartitionNameSet);
} finally {
cleanExistPartitionNameSet(existPartitionNameSet, partitionNameToTabletSet);
db.writeUnlock();
}
} catch (DdlException e) {
cleanTabletIdSetForAll(tabletIdSetForAll, olapTable.isLakeTable());
throw e;
}
}
public void replayAddPartition(PartitionPersistInfoV2 info) throws DdlException {
Database db = this.getDb(info.getDbId());
db.writeLock();
try {
OlapTable olapTable = (OlapTable) db.getTable(info.getTableId());
Partition partition = info.getPartition();
PartitionInfo partitionInfo = olapTable.getPartitionInfo();
if (info.isTempPartition()) {
olapTable.addTempPartition(partition);
} else {
olapTable.addPartition(partition);
}
PartitionType partitionType = partitionInfo.getType();
if (partitionType == PartitionType.LIST) {
try {
((ListPartitionInfo) partitionInfo).unprotectHandleNewPartitionDesc(
info.asListPartitionPersistInfo());
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
} else if (partitionType == PartitionType.RANGE) {
((RangePartitionInfo) partitionInfo).unprotectHandleNewSinglePartitionDesc(
info.asRangePartitionPersistInfo());
} else {
throw new DdlException("Only support adding partition to range/list partitioned table");
}
if (!isCheckpointThread()) {
TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex();
for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
long indexId = index.getId();
int schemaHash = olapTable.getSchemaHashByIndexId(indexId);
TabletMeta tabletMeta = new TabletMeta(info.getDbId(), info.getTableId(), partition.getId(),
index.getId(), schemaHash, info.getDataProperty().getStorageMedium());
for (Tablet tablet : index.getTablets()) {
long tabletId = tablet.getId();
invertedIndex.addTablet(tabletId, tabletMeta);
if (tablet instanceof LocalTablet) {
for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) {
invertedIndex.addReplica(tabletId, replica);
}
}
}
}
}
} finally {
db.writeUnlock();
}
}
public void replayAddPartition(PartitionPersistInfo info) throws DdlException {
Database db = this.getDb(info.getDbId());
db.writeLock();
try {
OlapTable olapTable = (OlapTable) db.getTable(info.getTableId());
Partition partition = info.getPartition();
PartitionInfo partitionInfo = olapTable.getPartitionInfo();
if (info.isTempPartition()) {
olapTable.addTempPartition(partition);
} else {
olapTable.addPartition(partition);
}
if (partitionInfo.getType() == PartitionType.RANGE) {
((RangePartitionInfo) partitionInfo).unprotectHandleNewSinglePartitionDesc(partition.getId(),
info.isTempPartition(), info.getRange(), info.getDataProperty(), info.getReplicationNum(),
info.isInMemory());
} else {
partitionInfo.addPartition(
partition.getId(), info.getDataProperty(), info.getReplicationNum(), info.isInMemory());
}
if (!isCheckpointThread()) {
TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex();
for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
long indexId = index.getId();
int schemaHash = olapTable.getSchemaHashByIndexId(indexId);
TabletMeta tabletMeta = new TabletMeta(info.getDbId(), info.getTableId(), partition.getId(),
index.getId(), schemaHash, info.getDataProperty().getStorageMedium());
for (Tablet tablet : index.getTablets()) {
long tabletId = tablet.getId();
invertedIndex.addTablet(tabletId, tabletMeta);
for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) {
invertedIndex.addReplica(tabletId, replica);
}
}
}
}
} finally {
db.writeUnlock();
}
}
public void replayDropPartition(DropPartitionInfo info) {
Database db = this.getDb(info.getDbId());
db.writeLock();
Set<Long> tabletIdSet = new HashSet<Long>();
try {
OlapTable olapTable = (OlapTable) db.getTable(info.getTableId());
if (info.isTempPartition()) {
olapTable.dropTempPartition(info.getPartitionName(), true);
} else {
tabletIdSet = olapTable.dropPartition(info.getDbId(), info.getPartitionName(), info.isForceDrop());
}
if (!tabletIdSet.isEmpty()) {
stateMgr.getShardManager().getShardDeleter().addUnusedShardId(tabletIdSet);
}
} finally {
db.writeUnlock();
}
}
public void replayErasePartition(long partitionId) throws DdlException {
recycleBin.replayErasePartition(partitionId);
}
public void replayRecoverPartition(RecoverInfo info) {
long dbId = info.getDbId();
Database db = getDb(dbId);
db.writeLock();
try {
Table table = db.getTable(info.getTableId());
recycleBin.replayRecoverPartition((OlapTable) table, info.getPartitionId());
} finally {
db.writeUnlock();
}
}
private Partition createPartition(Database db, OlapTable table, long partitionId, String partitionName,
Long version, Set<Long> tabletIdSet) throws DdlException {
return createPartitionCommon(db, table, partitionId, partitionName, table.getPartitionInfo(), version,
tabletIdSet);
}
private Partition createPartitionCommon(Database db, OlapTable table, long partitionId, String partitionName,
PartitionInfo partitionInfo, Long version, Set<Long> tabletIdSet)
throws DdlException {
Map<Long, MaterializedIndex> indexMap = new HashMap<>();
for (long indexId : table.getIndexIdToMeta().keySet()) {
MaterializedIndex rollup = new MaterializedIndex(indexId, MaterializedIndex.IndexState.NORMAL);
indexMap.put(indexId, rollup);
}
DistributionInfo distributionInfo = table.getDefaultDistributionInfo();
Partition partition =
new Partition(partitionId, partitionName, indexMap.get(table.getBaseIndexId()), distributionInfo);
if (version != null) {
partition.updateVisibleVersion(version);
}
short replicationNum = partitionInfo.getReplicationNum(partitionId);
TStorageMedium storageMedium = partitionInfo.getDataProperty(partitionId).getStorageMedium();
for (Map.Entry<Long, MaterializedIndex> entry : indexMap.entrySet()) {
long indexId = entry.getKey();
MaterializedIndex index = entry.getValue();
MaterializedIndexMeta indexMeta = table.getIndexIdToMeta().get(indexId);
TabletMeta tabletMeta =
new TabletMeta(db.getId(), table.getId(), partitionId, indexId, indexMeta.getSchemaHash(),
storageMedium, table.isLakeTable());
if (table.isLakeTable()) {
createLakeTablets((LakeTable) table, partitionId, index, distributionInfo, replicationNum, tabletMeta,
tabletIdSet);
} else {
createOlapTablets(index, Replica.ReplicaState.NORMAL, distributionInfo,
partition.getVisibleVersion(), replicationNum, tabletMeta, tabletIdSet);
}
if (index.getId() != table.getBaseIndexId()) {
partition.createRollupIndex(index);
}
}
return partition;
}
private void buildPartitions(Database db, OlapTable table, List<Partition> partitions) throws DdlException {
if (partitions.isEmpty()) {
return;
}
int numAliveBackends = systemInfoService.getAliveBackendNumber();
int numReplicas = 0;
for (Partition partition : partitions) {
numReplicas += partition.getReplicaCount();
}
if (partitions.size() >= 3 && numAliveBackends >= 3 && numReplicas >= numAliveBackends * 500) {
LOG.info("creating {} partitions of table {} concurrently", partitions.size(), table.getName());
buildPartitionsConcurrently(db.getId(), table, partitions, numReplicas, numAliveBackends);
} else if (numAliveBackends > 0) {
buildPartitionsSequentially(db.getId(), table, partitions, numReplicas, numAliveBackends);
} else {
throw new DdlException("no alive backend");
}
}
private int countMaxTasksPerBackend(List<CreateReplicaTask> tasks) {
Map<Long, Integer> tasksPerBackend = new HashMap<>();
for (CreateReplicaTask task : tasks) {
tasksPerBackend.compute(task.getBackendId(), (k, v) -> (v == null) ? 1 : v + 1);
}
return Collections.max(tasksPerBackend.values());
}
private void buildPartitionsSequentially(long dbId, OlapTable table, List<Partition> partitions, int numReplicas,
int numBackends) throws DdlException {
int avgReplicasPerPartition = numReplicas / partitions.size();
int partitionGroupSize = Math.max(1, numBackends * 200 / Math.max(1, avgReplicasPerPartition));
for (int i = 0; i < partitions.size(); i += partitionGroupSize) {
int endIndex = Math.min(partitions.size(), i + partitionGroupSize);
List<CreateReplicaTask> tasks = buildCreateReplicaTasks(dbId, table, partitions.subList(i, endIndex));
int partitionCount = endIndex - i;
int indexCountPerPartition = partitions.get(i).getVisibleMaterializedIndicesCount();
int timeout = Config.tablet_create_timeout_second * countMaxTasksPerBackend(tasks);
int maxTimeout = partitionCount * indexCountPerPartition * Config.max_create_table_timeout_second;
try {
sendCreateReplicaTasksAndWaitForFinished(tasks, Math.min(timeout, maxTimeout));
tasks.clear();
} finally {
for (CreateReplicaTask task : tasks) {
AgentTaskQueue.removeTask(task.getBackendId(), TTaskType.CREATE, task.getSignature());
}
}
}
}
private void buildPartitionsConcurrently(long dbId, OlapTable table, List<Partition> partitions, int numReplicas,
int numBackends) throws DdlException {
int timeout = numReplicas / numBackends * Config.tablet_create_timeout_second;
int numIndexes = partitions.stream().mapToInt(Partition::getVisibleMaterializedIndicesCount).sum();
int maxTimeout = numIndexes * Config.max_create_table_timeout_second;
MarkedCountDownLatch<Long, Long> countDownLatch = new MarkedCountDownLatch<>(numReplicas);
Thread t = new Thread(() -> {
Map<Long, List<Long>> taskSignatures = new HashMap<>();
try {
int numFinishedTasks;
int numSendedTasks = 0;
for (Partition partition : partitions) {
if (!countDownLatch.getStatus().ok()) {
break;
}
List<CreateReplicaTask> tasks = buildCreateReplicaTasks(dbId, table, partition);
for (CreateReplicaTask task : tasks) {
List<Long> signatures =
taskSignatures.computeIfAbsent(task.getBackendId(), k -> new ArrayList<>());
signatures.add(task.getSignature());
}
sendCreateReplicaTasks(tasks, countDownLatch);
numSendedTasks += tasks.size();
numFinishedTasks = numReplicas - (int) countDownLatch.getCount();
while (numSendedTasks - numFinishedTasks > 200 * numBackends) {
ThreadUtil.sleepAtLeastIgnoreInterrupts(100);
numFinishedTasks = numReplicas - (int) countDownLatch.getCount();
}
}
countDownLatch.await();
if (countDownLatch.getStatus().ok()) {
taskSignatures.clear();
}
} catch (Exception e) {
LOG.warn(e);
countDownLatch.countDownToZero(new Status(TStatusCode.UNKNOWN, e.toString()));
} finally {
for (Map.Entry<Long, List<Long>> entry : taskSignatures.entrySet()) {
for (Long signature : entry.getValue()) {
AgentTaskQueue.removeTask(entry.getKey(), TTaskType.CREATE, signature);
}
}
}
}, "partition-build");
t.start();
try {
waitForFinished(countDownLatch, Math.min(timeout, maxTimeout));
} catch (Exception e) {
countDownLatch.countDownToZero(new Status(TStatusCode.UNKNOWN, e.getMessage()));
throw e;
}
}
private List<CreateReplicaTask> buildCreateReplicaTasks(long dbId, OlapTable table, List<Partition> partitions)
throws DdlException {
List<CreateReplicaTask> tasks = new ArrayList<>();
for (Partition partition : partitions) {
tasks.addAll(buildCreateReplicaTasks(dbId, table, partition));
}
return tasks;
}
private List<CreateReplicaTask> buildCreateReplicaTasks(long dbId, OlapTable table, Partition partition)
throws DdlException {
ArrayList<CreateReplicaTask> tasks = new ArrayList<>((int) partition.getReplicaCount());
for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) {
tasks.addAll(buildCreateReplicaTasks(dbId, table, partition, index));
}
return tasks;
}
private List<CreateReplicaTask> buildCreateReplicaTasks(long dbId, OlapTable table, Partition partition,
MaterializedIndex index) throws DdlException {
List<CreateReplicaTask> tasks = new ArrayList<>((int) index.getReplicaCount());
MaterializedIndexMeta indexMeta = table.getIndexMetaByIndexId(index.getId());
for (Tablet tablet : index.getTablets()) {
if (table.isLakeTable()) {
long primaryBackendId = -1;
try {
primaryBackendId = ((LakeTablet) tablet).getPrimaryBackendId();
} catch (UserException e) {
throw new DdlException(e.getMessage());
}
CreateReplicaTask task = new CreateReplicaTask(
primaryBackendId,
dbId,
table.getId(),
partition.getId(),
index.getId(),
tablet.getId(),
indexMeta.getShortKeyColumnCount(),
indexMeta.getSchemaHash(),
partition.getVisibleVersion(),
indexMeta.getKeysType(),
indexMeta.getStorageType(),
table.getPartitionInfo().getDataProperty(partition.getId()).getStorageMedium(),
indexMeta.getSchema(),
table.getBfColumns(),
table.getBfFpp(),
null,
table.getIndexes(),
table.getPartitionInfo().getIsInMemory(partition.getId()),
table.enablePersistentIndex(),
TTabletType.TABLET_TYPE_LAKE,
table.getCompressionType());
tasks.add(task);
} else {
for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) {
CreateReplicaTask task = new CreateReplicaTask(
replica.getBackendId(),
dbId,
table.getId(),
partition.getId(),
index.getId(),
tablet.getId(),
indexMeta.getShortKeyColumnCount(),
indexMeta.getSchemaHash(),
partition.getVisibleVersion(),
indexMeta.getKeysType(),
indexMeta.getStorageType(),
table.getPartitionInfo().getDataProperty(partition.getId()).getStorageMedium(),
indexMeta.getSchema(),
table.getBfColumns(),
table.getBfFpp(),
null,
table.getIndexes(),
table.getPartitionInfo().getIsInMemory(partition.getId()),
table.enablePersistentIndex(),
table.getPartitionInfo().getTabletType(partition.getId()),
table.getCompressionType());
tasks.add(task);
}
}
}
return tasks;
}
private void sendCreateReplicaTasksAndWaitForFinished(List<CreateReplicaTask> tasks, long timeout)
throws DdlException {
MarkedCountDownLatch<Long, Long> countDownLatch = new MarkedCountDownLatch<>(tasks.size());
sendCreateReplicaTasks(tasks, countDownLatch);
waitForFinished(countDownLatch, timeout);
}
private void sendCreateReplicaTasks(List<CreateReplicaTask> tasks,
MarkedCountDownLatch<Long, Long> countDownLatch) {
HashMap<Long, AgentBatchTask> batchTaskMap = new HashMap<>();
for (CreateReplicaTask task : tasks) {
task.setLatch(countDownLatch);
countDownLatch.addMark(task.getBackendId(), task.getTabletId());
AgentBatchTask batchTask = batchTaskMap.get(task.getBackendId());
if (batchTask == null) {
batchTask = new AgentBatchTask();
batchTaskMap.put(task.getBackendId(), batchTask);
}
batchTask.addTask(task);
}
for (Map.Entry<Long, AgentBatchTask> entry : batchTaskMap.entrySet()) {
AgentTaskQueue.addBatchTask(entry.getValue());
AgentTaskExecutor.submit(entry.getValue());
}
}
private void waitForFinished(MarkedCountDownLatch<Long, Long> countDownLatch, long timeout) throws DdlException {
try {
if (countDownLatch.await(timeout, TimeUnit.SECONDS)) {
if (!countDownLatch.getStatus().ok()) {
String errMsg = "fail to create tablet: " + countDownLatch.getStatus().getErrorMsg();
LOG.warn(errMsg);
throw new DdlException(errMsg);
}
} else {
List<Map.Entry<Long, Long>> unfinishedMarks = countDownLatch.getLeftMarks();
List<Map.Entry<Long, Long>> firstThree =
unfinishedMarks.subList(0, Math.min(unfinishedMarks.size(), 3));
StringBuilder sb = new StringBuilder("fail to create tablet: timed out. unfinished replicas");
sb.append("(").append(firstThree.size()).append("/").append(unfinishedMarks.size()).append("): ");
for (Map.Entry<Long, Long> mark : firstThree) {
sb.append(mark.getValue());
sb.append('(');
Backend backend = stateMgr.getClusterInfo().getBackend(mark.getKey());
sb.append(backend != null ? backend.getHost() : "N/A");
sb.append(") ");
}
sb.append(" timeout=").append(timeout).append("s");
String errMsg = sb.toString();
LOG.warn(errMsg);
countDownLatch.countDownToZero(new Status(TStatusCode.TIMEOUT, "timed out"));
throw new DdlException(errMsg);
}
} catch (InterruptedException e) {
LOG.warn(e);
countDownLatch.countDownToZero(new Status(TStatusCode.CANCELLED, "cancelled"));
}
}
/*
* generate and check columns' order and key's existence
*/
private void validateColumns(List<Column> columns) throws DdlException {
if (columns.isEmpty()) {
ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_MUST_HAVE_COLUMNS);
}
boolean encounterValue = false;
boolean hasKey = false;
for (Column column : columns) {
if (column.isKey()) {
if (encounterValue) {
ErrorReport.reportDdlException(ErrorCode.ERR_OLAP_KEY_MUST_BEFORE_VALUE);
}
hasKey = true;
} else {
encounterValue = true;
}
}
if (!hasKey) {
ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_MUST_HAVE_KEYS);
}
}
public void setColocateTableIndex(ColocateTableIndex colocateTableIndex) {
this.colocateTableIndex = colocateTableIndex;
}
public ColocateTableIndex getColocateTableIndex() {
return colocateTableIndex;
}
private void createOlapOrLakeTable(Database db, CreateTableStmt stmt) throws DdlException {
String tableName = stmt.getTableName();
LOG.debug("begin create olap table: {}", tableName);
List<Column> baseSchema = stmt.getColumns();
validateColumns(baseSchema);
PartitionDesc partitionDesc = stmt.getPartitionDesc();
PartitionInfo partitionInfo;
Map<String, Long> partitionNameToId = Maps.newHashMap();
if (partitionDesc != null) {
if (partitionDesc instanceof RangePartitionDesc) {
RangePartitionDesc rangePartitionDesc = (RangePartitionDesc) partitionDesc;
for (SingleRangePartitionDesc desc : rangePartitionDesc.getSingleRangePartitionDescs()) {
long partitionId = getNextId();
partitionNameToId.put(desc.getPartitionName(), partitionId);
}
} else if (partitionDesc instanceof ListPartitionDesc) {
ListPartitionDesc listPartitionDesc = (ListPartitionDesc) partitionDesc;
listPartitionDesc.findAllPartitionNames()
.forEach(partitionName -> partitionNameToId.put(partitionName, getNextId()));
} else {
throw new DdlException("Currently only support range or list partition with engine type olap");
}
partitionInfo = partitionDesc.toPartitionInfo(baseSchema, partitionNameToId, false);
} else {
if (DynamicPartitionUtil.checkDynamicPartitionPropertiesExist(stmt.getProperties())) {
throw new DdlException("Only support dynamic partition properties on range partition table");
}
long partitionId = getNextId();
partitionNameToId.put(tableName, partitionId);
partitionInfo = new SinglePartitionInfo();
}
KeysDesc keysDesc = stmt.getKeysDesc();
Preconditions.checkNotNull(keysDesc);
KeysType keysType = keysDesc.getKeysType();
DistributionDesc distributionDesc = stmt.getDistributionDesc();
Preconditions.checkNotNull(distributionDesc);
DistributionInfo distributionInfo = distributionDesc.toDistributionInfo(baseSchema);
short shortKeyColumnCount = GlobalStateMgr.calcShortKeyColumnCount(baseSchema, stmt.getProperties());
LOG.debug("create table[{}] short key column count: {}", tableName, shortKeyColumnCount);
TableIndexes indexes = new TableIndexes(stmt.getIndexes());
Map<String, String> properties = stmt.getProperties();
long tableId = GlobalStateMgr.getCurrentState().getNextId();
OlapTable olapTable = null;
if (stmt.isExternal()) {
olapTable = new ExternalOlapTable(db.getId(), tableId, tableName, baseSchema, keysType, partitionInfo,
distributionInfo, indexes, properties);
} else {
if (stmt.isLakeEngine()) {
olapTable = new LakeTable(tableId, tableName, baseSchema, keysType, partitionInfo,
distributionInfo, indexes);
boolean enableStorageCache = PropertyAnalyzer.analyzeBooleanProp(
properties, PropertyAnalyzer.PROPERTIES_ENABLE_STORAGE_CACHE, false);
long storageCacheTtlS = 0;
try {
storageCacheTtlS = PropertyAnalyzer.analyzeLongProp(
properties, PropertyAnalyzer.PROPERTIES_STORAGE_CACHE_TTL, 0);
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
if (storageCacheTtlS < -1) {
throw new DdlException("Storage cache ttl should not be less than -1");
}
if (!enableStorageCache && storageCacheTtlS != 0) {
throw new DdlException("Storage cache ttl should be 0 when cache is disabled");
}
if (enableStorageCache && storageCacheTtlS == 0) {
storageCacheTtlS = Config.tablet_sched_storage_cooldown_second;
}
boolean allowAsyncWriteBack = PropertyAnalyzer.analyzeBooleanProp(
properties, PropertyAnalyzer.PROPERTIES_ALLOW_ASYNC_WRITE_BACK, false);
if (!enableStorageCache && allowAsyncWriteBack) {
throw new DdlException("storage allow_async_write_back can't be enabled when cache is disabled");
}
ShardStorageInfo shardStorageInfo = stateMgr.getStarOSAgent().getServiceShardStorageInfo();
((LakeTable) olapTable)
.setStorageInfo(shardStorageInfo, enableStorageCache, storageCacheTtlS, allowAsyncWriteBack);
} else {
Preconditions.checkState(stmt.isOlapEngine());
olapTable = new OlapTable(tableId, tableName, baseSchema, keysType, partitionInfo,
distributionInfo, indexes);
}
}
olapTable.setComment(stmt.getComment());
long baseIndexId = getNextId();
olapTable.setBaseIndexId(baseIndexId);
Set<String> bfColumns = null;
double bfFpp = 0;
try {
bfColumns = PropertyAnalyzer.analyzeBloomFilterColumns(properties, baseSchema,
olapTable.getKeysType() == KeysType.PRIMARY_KEYS);
if (bfColumns != null && bfColumns.isEmpty()) {
bfColumns = null;
}
bfFpp = PropertyAnalyzer.analyzeBloomFilterFpp(properties);
if (bfColumns != null && bfFpp == 0) {
bfFpp = FeConstants.default_bloom_filter_fpp;
} else if (bfColumns == null) {
bfFpp = 0;
}
olapTable.setBloomFilterInfo(bfColumns, bfFpp);
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
short replicationNum = FeConstants.default_replication_num;
try {
boolean isReplicationNumSet =
properties != null && properties.containsKey(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM);
replicationNum = PropertyAnalyzer.analyzeReplicationNum(properties, replicationNum);
if (isReplicationNumSet) {
olapTable.setReplicationNum(replicationNum);
}
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
boolean isInMemory =
PropertyAnalyzer.analyzeBooleanProp(properties, PropertyAnalyzer.PROPERTIES_INMEMORY, false);
olapTable.setIsInMemory(isInMemory);
boolean enablePersistentIndex =
PropertyAnalyzer.analyzeBooleanProp(properties, PropertyAnalyzer.PROPERTIES_ENABLE_PERSISTENT_INDEX,
false);
olapTable.setEnablePersistentIndex(enablePersistentIndex);
if (olapTable.getKeysType() == KeysType.PRIMARY_KEYS && olapTable.enablePersistentIndex()) {
if (!olapTable.checkPersistentIndex()) {
throw new DdlException("PrimaryKey table using persistent index don't support varchar(char) as key so far," +
" and key length should be no more than 64 Bytes");
}
}
TTabletType tabletType = TTabletType.TABLET_TYPE_DISK;
try {
tabletType = PropertyAnalyzer.analyzeTabletType(properties);
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
if (partitionInfo.getType() == PartitionType.UNPARTITIONED) {
long partitionId = partitionNameToId.get(tableName);
DataProperty dataProperty = null;
try {
boolean hasMedium = false;
if (properties != null) {
hasMedium = properties.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_MEDIUM);
}
dataProperty = PropertyAnalyzer.analyzeDataProperty(properties, DataProperty.DEFAULT_DATA_PROPERTY);
if (hasMedium) {
olapTable.setStorageMedium(dataProperty.getStorageMedium());
}
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
Preconditions.checkNotNull(dataProperty);
partitionInfo.setDataProperty(partitionId, dataProperty);
partitionInfo.setReplicationNum(partitionId, replicationNum);
partitionInfo.setIsInMemory(partitionId, isInMemory);
partitionInfo.setTabletType(partitionId, tabletType);
partitionInfo.setStorageInfo(partitionId, olapTable.getTableProperty().getStorageInfo());
}
try {
String colocateGroup = PropertyAnalyzer.analyzeColocate(properties);
if (!Strings.isNullOrEmpty(colocateGroup)) {
String fullGroupName = db.getId() + "_" + colocateGroup;
ColocateGroupSchema groupSchema = colocateTableIndex.getGroupSchema(fullGroupName);
if (groupSchema != null) {
groupSchema.checkColocateSchema(olapTable);
}
colocateTableIndex.addTableToGroup(db.getId(), olapTable, colocateGroup,
null /* generate group id inside */);
olapTable.setColocateGroup(colocateGroup);
}
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
TStorageType baseIndexStorageType = null;
try {
baseIndexStorageType = PropertyAnalyzer.analyzeStorageType(properties);
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
Preconditions.checkNotNull(baseIndexStorageType);
int schemaVersion = 0;
try {
schemaVersion = PropertyAnalyzer.analyzeSchemaVersion(properties);
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
int schemaHash = Util.schemaHash(schemaVersion, baseSchema, bfColumns, bfFpp);
olapTable.setIndexMeta(baseIndexId, tableName, baseSchema, schemaVersion, schemaHash,
shortKeyColumnCount, baseIndexStorageType, keysType);
for (AlterClause alterClause : stmt.getRollupAlterClauseList()) {
AddRollupClause addRollupClause = (AddRollupClause) alterClause;
Long baseRollupIndex = olapTable.getIndexIdByName(tableName);
TStorageType rollupIndexStorageType = null;
try {
rollupIndexStorageType = PropertyAnalyzer.analyzeStorageType(addRollupClause.getProperties());
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
Preconditions.checkNotNull(rollupIndexStorageType);
List<Column> rollupColumns = stateMgr.getRollupHandler().checkAndPrepareMaterializedView(addRollupClause,
olapTable, baseRollupIndex, false);
short rollupShortKeyColumnCount =
GlobalStateMgr.calcShortKeyColumnCount(rollupColumns, alterClause.getProperties());
int rollupSchemaHash = Util.schemaHash(schemaVersion, rollupColumns, bfColumns, bfFpp);
long rollupIndexId = getNextId();
olapTable.setIndexMeta(rollupIndexId, addRollupClause.getRollupName(), rollupColumns, schemaVersion,
rollupSchemaHash, rollupShortKeyColumnCount, rollupIndexStorageType, keysType);
}
Long version = null;
try {
version = PropertyAnalyzer.analyzeVersionInfo(properties);
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
Preconditions.checkNotNull(version);
TStorageFormat storageFormat = TStorageFormat.DEFAULT;
try {
storageFormat = PropertyAnalyzer.analyzeStorageFormat(properties);
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
olapTable.setStorageFormat(storageFormat);
TCompressionType compressionType = TCompressionType.LZ4_FRAME;
try {
compressionType = PropertyAnalyzer.analyzeCompressionType(properties);
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
olapTable.setCompressionType(compressionType);
Set<Long> tabletIdSet = new HashSet<Long>();
boolean createTblSuccess = false;
boolean addToColocateGroupSuccess = false;
try {
if (olapTable.isOlapOrLakeTable()) {
if (partitionInfo.getType() == PartitionType.UNPARTITIONED) {
long partitionId = partitionNameToId.get(tableName);
Partition partition = createPartition(db, olapTable, partitionId, tableName, version, tabletIdSet);
buildPartitions(db, olapTable, Collections.singletonList(partition));
olapTable.addPartition(partition);
} else if (partitionInfo.getType() == PartitionType.RANGE
|| partitionInfo.getType() == PartitionType.LIST) {
try {
boolean hasMedium = false;
if (properties != null) {
hasMedium = properties.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_MEDIUM);
}
DataProperty dataProperty = PropertyAnalyzer.analyzeDataProperty(properties,
DataProperty.DEFAULT_DATA_PROPERTY);
DynamicPartitionUtil
.checkAndSetDynamicPartitionBuckets(properties, distributionDesc.getBuckets());
DynamicPartitionUtil.checkAndSetDynamicPartitionProperty(olapTable, properties);
if (olapTable.dynamicPartitionExists() && olapTable.getColocateGroup() != null) {
HashDistributionInfo info = (HashDistributionInfo) distributionInfo;
if (info.getBucketNum() !=
olapTable.getTableProperty().getDynamicPartitionProperty().getBuckets()) {
throw new DdlException("dynamic_partition.buckets should equal the distribution buckets"
+ " if creating a colocate table");
}
}
if (hasMedium) {
olapTable.setStorageMedium(dataProperty.getStorageMedium());
}
if (properties != null && !properties.isEmpty()) {
throw new DdlException("Unknown properties: " + properties);
}
} catch (AnalysisException e) {
throw new DdlException(e.getMessage());
}
List<Partition> partitions = new ArrayList<>(partitionNameToId.size());
for (Map.Entry<String, Long> entry : partitionNameToId.entrySet()) {
Partition partition = createPartition(db, olapTable, entry.getValue(), entry.getKey(), version,
tabletIdSet);
partitions.add(partition);
}
buildPartitions(db, olapTable, partitions);
for (Partition partition : partitions) {
olapTable.addPartition(partition);
}
} else {
throw new DdlException("Unsupported partition method: " + partitionInfo.getType().name());
}
}
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
if (getDb(db.getId()) == null) {
throw new DdlException("database has been dropped when creating table");
}
createTblSuccess = db.createTableWithLock(olapTable, false);
if (!createTblSuccess) {
if (!stmt.isSetIfNotExists()) {
ErrorReport
.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, tableName, "table already exists");
} else {
LOG.info("Create table[{}] which already exists", tableName);
return;
}
}
} finally {
unlock();
}
if (colocateTableIndex.isColocateTable(tableId)) {
ColocateTableIndex.GroupId groupId = colocateTableIndex.getGroup(tableId);
List<List<Long>> backendsPerBucketSeq = colocateTableIndex.getBackendsPerBucketSeq(groupId);
ColocatePersistInfo info =
ColocatePersistInfo.createForAddTable(groupId, tableId, backendsPerBucketSeq);
editLog.logColocateAddTable(info);
addToColocateGroupSuccess = true;
}
LOG.info("Successfully create table[{};{}]", tableName, tableId);
DynamicPartitionUtil.registerOrRemoveDynamicPartitionTable(db.getId(), olapTable);
stateMgr.getDynamicPartitionScheduler().createOrUpdateRuntimeInfo(
tableName, DynamicPartitionScheduler.LAST_UPDATE_TIME, TimeUtils.getCurrentFormatTime());
} finally {
if (!createTblSuccess) {
for (Long tabletId : tabletIdSet) {
GlobalStateMgr.getCurrentInvertedIndex().deleteTablet(tabletId);
}
if (olapTable.isLakeTable()) {
stateMgr.getShardManager().getShardDeleter().addUnusedShardId(tabletIdSet);
editLog.logAddUnusedShard(tabletIdSet);
}
}
if (colocateTableIndex.isColocateTable(tableId) && !addToColocateGroupSuccess) {
colocateTableIndex.removeTable(tableId);
}
}
}
private void createMysqlTable(Database db, CreateTableStmt stmt) throws DdlException {
String tableName = stmt.getTableName();
List<Column> columns = stmt.getColumns();
long tableId = GlobalStateMgr.getCurrentState().getNextId();
MysqlTable mysqlTable = new MysqlTable(tableId, tableName, columns, stmt.getProperties());
mysqlTable.setComment(stmt.getComment());
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
if (getDb(db.getId()) == null) {
throw new DdlException("database has been dropped when creating table");
}
if (!db.createTableWithLock(mysqlTable, false)) {
if (!stmt.isSetIfNotExists()) {
ErrorReport.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, tableName, "table already exists");
} else {
LOG.info("Create table[{}] which already exists", tableName);
return;
}
}
} finally {
unlock();
}
LOG.info("Successfully create table[{}-{}]", tableName, tableId);
}
private void createEsTable(Database db, CreateTableStmt stmt) throws DdlException {
String tableName = stmt.getTableName();
List<Column> baseSchema = stmt.getColumns();
validateColumns(baseSchema);
PartitionDesc partitionDesc = stmt.getPartitionDesc();
PartitionInfo partitionInfo = null;
Map<String, Long> partitionNameToId = Maps.newHashMap();
if (partitionDesc != null) {
partitionInfo = partitionDesc.toPartitionInfo(baseSchema, partitionNameToId, false);
} else {
long partitionId = getNextId();
partitionNameToId.put(tableName, partitionId);
partitionInfo = new SinglePartitionInfo();
}
long tableId = GlobalStateMgr.getCurrentState().getNextId();
EsTable esTable = new EsTable(tableId, tableName, baseSchema, stmt.getProperties(), partitionInfo);
esTable.setComment(stmt.getComment());
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
if (getDb(db.getId()) == null) {
throw new DdlException("database has been dropped when creating table");
}
if (!db.createTableWithLock(esTable, false)) {
if (!stmt.isSetIfNotExists()) {
ErrorReport.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, tableName, "table already exists");
} else {
LOG.info("create table[{}] which already exists", tableName);
return;
}
}
} finally {
unlock();
}
LOG.info("successfully create table{} with id {}", tableName, tableId);
}
private void createHiveTable(Database db, CreateTableStmt stmt) throws DdlException {
String tableName = stmt.getTableName();
List<Column> columns = stmt.getColumns();
long tableId = getNextId();
HiveTable hiveTable = new HiveTable(tableId, tableName, columns, stmt.getProperties());
String partitionCmt = "PARTITION BY (" + String.join(", ", hiveTable.getPartitionColumnNames()) + ")";
if (Strings.isNullOrEmpty(stmt.getComment())) {
hiveTable.setComment(partitionCmt);
} else {
hiveTable.setComment(stmt.getComment());
}
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
if (getDb(db.getId()) == null) {
throw new DdlException("database has been dropped when creating table");
}
if (!db.createTableWithLock(hiveTable, false)) {
if (!stmt.isSetIfNotExists()) {
ErrorReport.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, tableName, "table already exists");
} else {
LOG.info("create table[{}] which already exists", tableName);
return;
}
}
} finally {
unlock();
}
LOG.info("successfully create table[{}-{}]", tableName, tableId);
}
private void createIcebergTable(Database db, CreateTableStmt stmt) throws DdlException {
String tableName = stmt.getTableName();
List<Column> columns = stmt.getColumns();
long tableId = getNextId();
IcebergTable icebergTable = new IcebergTable(tableId, tableName, columns, stmt.getProperties());
if (!Strings.isNullOrEmpty(stmt.getComment())) {
icebergTable.setComment(stmt.getComment());
}
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
if (getDb(db.getId()) == null) {
throw new DdlException("database has been dropped when creating table");
}
if (!db.createTableWithLock(icebergTable, false)) {
if (!stmt.isSetIfNotExists()) {
ErrorReport.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, tableName, "table already exists");
} else {
LOG.info("create table[{}] which already exists", tableName);
return;
}
}
} finally {
unlock();
}
LOG.info("successfully create table[{}-{}]", tableName, tableId);
}
private void createHudiTable(Database db, CreateTableStmt stmt) throws DdlException {
String tableName = stmt.getTableName();
List<Column> columns = stmt.getColumns();
Set<String> metaFields = new HashSet<>(Arrays.asList(
HoodieRecord.COMMIT_TIME_METADATA_FIELD,
HoodieRecord.COMMIT_SEQNO_METADATA_FIELD,
HoodieRecord.RECORD_KEY_METADATA_FIELD,
HoodieRecord.PARTITION_PATH_METADATA_FIELD,
HoodieRecord.FILENAME_METADATA_FIELD));
Set<String> includedMetaFields = columns.stream().map(Column::getName)
.filter(metaFields::contains).collect(Collectors.toSet());
metaFields.removeAll(includedMetaFields);
metaFields.forEach(f -> columns.add(new Column(f, Type.STRING, true)));
long tableId = getNextId();
HudiTable hudiTable = new HudiTable(tableId, tableName, columns, stmt.getProperties());
String partitionCmt = "PARTITION BY (" + String.join(", ", hudiTable.getPartitionColumnNames()) + ")";
if (Strings.isNullOrEmpty(stmt.getComment())) {
hudiTable.setComment(partitionCmt);
} else {
hudiTable.setComment(stmt.getComment());
}
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
if (getDb(db.getFullName()) == null) {
throw new DdlException("Database has been dropped when creating table");
}
if (!db.createTableWithLock(hudiTable, false)) {
if (!stmt.isSetIfNotExists()) {
ErrorReport.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, tableName, "table already exists");
} else {
LOG.info("Create table[{}] which already exists", tableName);
return;
}
}
} finally {
unlock();
}
LOG.info("Successfully create table[{}-{}]", tableName, tableId);
}
private void createJDBCTable(Database db, CreateTableStmt stmt) throws DdlException {
String tableName = stmt.getTableName();
List<Column> columns = stmt.getColumns();
Map<String, String> properties = stmt.getProperties();
long tableId = getNextId();
JDBCTable jdbcTable = new JDBCTable(tableId, tableName, columns, properties);
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
if (getDb(db.getFullName()) == null) {
throw new DdlException("database has been dropped when creating table");
}
if (!db.createTableWithLock(jdbcTable, false)) {
if (!stmt.isSetIfNotExists()) {
ErrorReport.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, tableName, "table already exists");
} else {
LOG.info("create table [{}] which already exists", tableName);
return;
}
}
} finally {
unlock();
}
LOG.info("successfully create jdbc table[{}-{}]", tableName, tableId);
}
public void replayCreateTable(String dbName, Table table) {
Database db = this.fullNameToDb.get(dbName);
db.createTableWithLock(table, true);
if (!isCheckpointThread()) {
if (table.isOlapOrLakeTable()) {
TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex();
OlapTable olapTable = (OlapTable) table;
long dbId = db.getId();
long tableId = table.getId();
for (Partition partition : olapTable.getAllPartitions()) {
long partitionId = partition.getId();
TStorageMedium medium = olapTable.getPartitionInfo().getDataProperty(
partitionId).getStorageMedium();
for (MaterializedIndex mIndex : partition
.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
long indexId = mIndex.getId();
int schemaHash = olapTable.getSchemaHashByIndexId(indexId);
TabletMeta tabletMeta = new TabletMeta(dbId, tableId, partitionId, indexId, schemaHash, medium,
table.isLakeTable());
for (Tablet tablet : mIndex.getTablets()) {
long tabletId = tablet.getId();
invertedIndex.addTablet(tabletId, tabletMeta);
if (table.isOlapTable()) {
for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) {
invertedIndex.addReplica(tabletId, replica);
}
}
}
}
}
DynamicPartitionUtil.registerOrRemoveDynamicPartitionTable(dbId, olapTable);
}
}
}
public void replayCreateMaterializedView(String dbName, MaterializedView materializedView) {
Database db = this.fullNameToDb.get(dbName);
db.createMaterializedWithLock(materializedView, true);
if (!isCheckpointThread()) {
TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex();
long dbId = db.getId();
long mvId = materializedView.getId();
for (Partition partition : materializedView.getAllPartitions()) {
long partitionId = partition.getId();
TStorageMedium medium = materializedView.getPartitionInfo().getDataProperty(
partitionId).getStorageMedium();
for (MaterializedIndex mIndex : partition.getMaterializedIndices(
MaterializedIndex.IndexExtState.ALL)) {
long indexId = mIndex.getId();
int schemaHash = materializedView.getSchemaHashByIndexId(indexId);
TabletMeta tabletMeta = new TabletMeta(dbId, mvId, partitionId, indexId, schemaHash, medium);
for (Tablet tablet : mIndex.getTablets()) {
long tabletId = tablet.getId();
invertedIndex.addTablet(tabletId, tabletMeta);
if (tablet instanceof LocalTablet) {
for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) {
invertedIndex.addReplica(tabletId, replica);
}
}
}
}
}
}
}
private void createLakeTablets(LakeTable table, long partitionId, MaterializedIndex index,
DistributionInfo distributionInfo, short replicationNum, TabletMeta tabletMeta,
Set<Long> tabletIdSet)
throws DdlException {
Preconditions.checkArgument(replicationNum > 0);
DistributionInfo.DistributionInfoType distributionInfoType = distributionInfo.getType();
if (distributionInfoType != DistributionInfo.DistributionInfoType.HASH) {
throw new DdlException("Unknown distribution type: " + distributionInfoType);
}
PartitionInfo partitionInfo = table.getPartitionInfo();
StorageInfo partitionStorageInfo = partitionInfo.getStorageInfo(partitionId);
CacheInfo cacheInfo = CacheInfo.newBuilder().setEnableCache(partitionStorageInfo.isEnableStorageCache())
.setTtlSeconds(partitionStorageInfo.getStorageCacheTtlS())
.setAllowAsyncWriteBack(partitionStorageInfo.isAllowAsyncWriteBack())
.build();
ShardStorageInfo shardStorageInfo = ShardStorageInfo.newBuilder(table.getShardStorageInfo())
.setCacheInfo(cacheInfo).build();
int bucketNum = distributionInfo.getBucketNum();
List<Long> shardIds = stateMgr.getStarOSAgent().createShards(bucketNum, shardStorageInfo);
for (long shardId : shardIds) {
Tablet tablet = new LakeTablet(shardId);
index.addTablet(tablet, tabletMeta);
tabletIdSet.add(tablet.getId());
}
}
private void createOlapTablets(MaterializedIndex index, Replica.ReplicaState replicaState,
DistributionInfo distributionInfo, long version, short replicationNum,
TabletMeta tabletMeta, Set<Long> tabletIdSet) throws DdlException {
Preconditions.checkArgument(replicationNum > 0);
DistributionInfo.DistributionInfoType distributionInfoType = distributionInfo.getType();
if (distributionInfoType != DistributionInfo.DistributionInfoType.HASH) {
throw new DdlException("Unknown distribution type: " + distributionInfoType);
}
List<List<Long>> backendsPerBucketSeq = null;
ColocateTableIndex.GroupId groupId = null;
if (colocateTableIndex.isColocateTable(tabletMeta.getTableId())) {
Database db = getDb(tabletMeta.getDbId());
groupId = colocateTableIndex.getGroup(tabletMeta.getTableId());
db.writeLock();
try {
backendsPerBucketSeq = colocateTableIndex.getBackendsPerBucketSeq(groupId);
} finally {
db.writeUnlock();
}
}
boolean chooseBackendsArbitrary = backendsPerBucketSeq == null || backendsPerBucketSeq.isEmpty();
if (chooseBackendsArbitrary) {
backendsPerBucketSeq = Lists.newArrayList();
}
for (int i = 0; i < distributionInfo.getBucketNum(); ++i) {
LocalTablet tablet = new LocalTablet(getNextId());
index.addTablet(tablet, tabletMeta);
tabletIdSet.add(tablet.getId());
List<Long> chosenBackendIds;
if (chooseBackendsArbitrary) {
if (Config.enable_strict_storage_medium_check) {
chosenBackendIds =
chosenBackendIdBySeq(replicationNum, tabletMeta.getStorageMedium());
} else {
chosenBackendIds = chosenBackendIdBySeq(replicationNum);
}
backendsPerBucketSeq.add(chosenBackendIds);
} else {
chosenBackendIds = backendsPerBucketSeq.get(i);
}
for (long backendId : chosenBackendIds) {
long replicaId = getNextId();
Replica replica = new Replica(replicaId, backendId, replicaState, version,
tabletMeta.getOldSchemaHash());
tablet.addReplica(replica);
}
Preconditions.checkState(chosenBackendIds.size() == replicationNum,
chosenBackendIds.size() + " vs. " + replicationNum);
}
if (groupId != null && chooseBackendsArbitrary) {
colocateTableIndex.addBackendsPerBucketSeq(groupId, backendsPerBucketSeq);
ColocatePersistInfo info =
ColocatePersistInfo.createForBackendsPerBucketSeq(groupId, backendsPerBucketSeq);
editLog.logColocateBackendsPerBucketSeq(info);
}
}
private List<Long> chosenBackendIdBySeq(int replicationNum, TStorageMedium storageMedium)
throws DdlException {
List<Long> chosenBackendIds = systemInfoService.seqChooseBackendIdsByStorageMedium(replicationNum,
true, true, storageMedium);
if (CollectionUtils.isEmpty(chosenBackendIds)) {
throw new DdlException(
"Failed to find enough hosts with storage medium " + storageMedium +
" at all backends, number of replicas needed: " +
replicationNum + ". Storage medium check failure can be forcefully ignored by executing " +
"'ADMIN SET FRONTEND CONFIG (\"enable_strict_storage_medium_check\" = \"false\");', " +
"but incompatible medium type can cause balance problem, so we strongly recommend" +
" creating table with compatible 'storage_medium' property set.");
}
return chosenBackendIds;
}
private List<Long> chosenBackendIdBySeq(int replicationNum) throws DdlException {
List<Long> chosenBackendIds =
systemInfoService.seqChooseBackendIds(replicationNum, true, true);
if (CollectionUtils.isEmpty(chosenBackendIds)) {
List<Long> backendIds = systemInfoService.getBackendIds(true);
throw new DdlException("Failed to find enough host in all backends. need: " + replicationNum +
", Current alive backend is [" + Joiner.on(",").join(backendIds) + "]");
}
return chosenBackendIds;
}
public void dropTable(DropTableStmt stmt) throws DdlException {
String dbName = stmt.getDbName();
String tableName = stmt.getTableName();
Database db = getDb(dbName);
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
db.dropTable(tableName, stmt.isSetIfExists(), stmt.isForceDrop());
}
public void sendDropTabletTasks(HashMap<Long, AgentBatchTask> batchTaskMap) {
int numDropTaskPerBe = Config.max_agent_tasks_send_per_be;
for (Map.Entry<Long, AgentBatchTask> entry : batchTaskMap.entrySet()) {
AgentBatchTask originTasks = entry.getValue();
if (originTasks.getTaskNum() > numDropTaskPerBe) {
AgentBatchTask partTask = new AgentBatchTask();
List<AgentTask> allTasks = originTasks.getAllTasks();
int curTask = 1;
for (AgentTask task : allTasks) {
partTask.addTask(task);
if (curTask++ > numDropTaskPerBe) {
AgentTaskExecutor.submit(partTask);
curTask = 1;
partTask = new AgentBatchTask();
ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
}
}
if (partTask.getAllTasks().size() > 0) {
AgentTaskExecutor.submit(partTask);
}
} else {
AgentTaskExecutor.submit(originTasks);
}
}
}
public void replayDropTable(Database db, long tableId, boolean isForceDrop) {
Runnable runnable;
db.writeLock();
try {
runnable = db.unprotectDropTable(tableId, isForceDrop, true);
} finally {
db.writeUnlock();
}
if (runnable != null) {
runnable.run();
}
}
public void replayEraseTable(long tableId) {
recycleBin.replayEraseTable(tableId);
}
public void replayEraseMultiTables(MultiEraseTableInfo multiEraseTableInfo) {
List<Long> tableIds = multiEraseTableInfo.getTableIds();
for (Long tableId : tableIds) {
recycleBin.replayEraseTable(tableId);
}
}
public void replayRecoverTable(RecoverInfo info) {
long dbId = info.getDbId();
Database db = getDb(dbId);
db.writeLock();
try {
recycleBin.replayRecoverTable(db, info.getTableId());
} finally {
db.writeUnlock();
}
}
private void unprotectAddReplica(ReplicaPersistInfo info) {
LOG.debug("replay add a replica {}", info);
Database db = getDbIncludeRecycleBin(info.getDbId());
OlapTable olapTable = (OlapTable) getTableIncludeRecycleBin(db, info.getTableId());
Partition partition = getPartitionIncludeRecycleBin(olapTable, info.getPartitionId());
MaterializedIndex materializedIndex = partition.getIndex(info.getIndexId());
LocalTablet tablet = (LocalTablet) materializedIndex.getTablet(info.getTabletId());
int schemaHash = info.getSchemaHash();
if (schemaHash == -1) {
schemaHash = olapTable.getSchemaHashByIndexId(info.getIndexId());
}
Replica replica = new Replica(info.getReplicaId(), info.getBackendId(), info.getVersion(),
schemaHash, info.getDataSize(), info.getRowCount(),
Replica.ReplicaState.NORMAL,
info.getLastFailedVersion(),
info.getLastSuccessVersion());
tablet.addReplica(replica);
}
private void unprotectUpdateReplica(ReplicaPersistInfo info) {
LOG.debug("replay update a replica {}", info);
Database db = getDbIncludeRecycleBin(info.getDbId());
OlapTable olapTable = (OlapTable) getTableIncludeRecycleBin(db, info.getTableId());
Partition partition = getPartitionIncludeRecycleBin(olapTable, info.getPartitionId());
MaterializedIndex materializedIndex = partition.getIndex(info.getIndexId());
LocalTablet tablet = (LocalTablet) materializedIndex.getTablet(info.getTabletId());
Replica replica = tablet.getReplicaByBackendId(info.getBackendId());
Preconditions.checkNotNull(replica, info);
replica.updateRowCount(info.getVersion(), info.getDataSize(), info.getRowCount());
replica.setBad(false);
}
public void replayAddReplica(ReplicaPersistInfo info) {
Database db = getDbIncludeRecycleBin(info.getDbId());
db.writeLock();
try {
unprotectAddReplica(info);
} finally {
db.writeUnlock();
}
}
public void replayUpdateReplica(ReplicaPersistInfo info) {
Database db = getDbIncludeRecycleBin(info.getDbId());
db.writeLock();
try {
unprotectUpdateReplica(info);
} finally {
db.writeUnlock();
}
}
public void unprotectDeleteReplica(ReplicaPersistInfo info) {
Database db = getDbIncludeRecycleBin(info.getDbId());
OlapTable olapTable = (OlapTable) getTableIncludeRecycleBin(db, info.getTableId());
Partition partition = getPartitionIncludeRecycleBin(olapTable, info.getPartitionId());
MaterializedIndex materializedIndex = partition.getIndex(info.getIndexId());
LocalTablet tablet = (LocalTablet) materializedIndex.getTablet(info.getTabletId());
tablet.deleteReplicaByBackendId(info.getBackendId());
}
public void replayDeleteReplica(ReplicaPersistInfo info) {
Database db = getDbIncludeRecycleBin(info.getDbId());
db.writeLock();
try {
unprotectDeleteReplica(info);
} finally {
db.writeUnlock();
}
}
@Override
public Table getTable(String dbName, String tblName) {
Database database = getDb(dbName);
if (database == null) {
return null;
}
return database.getTable(tblName);
}
@Override
public Database getDb(String name) {
if (name == null) {
return null;
}
if (fullNameToDb.containsKey(name)) {
return fullNameToDb.get(name);
} else {
String dbName = ClusterNamespace.getNameFromFullName(name);
if (dbName.equalsIgnoreCase(InfoSchemaDb.DATABASE_NAME)) {
return fullNameToDb.get(dbName.toLowerCase());
}
}
return null;
}
@Override
public Database getDb(long dbId) {
return idToDb.get(dbId);
}
public ConcurrentHashMap<String, Database> getFullNameToDb() {
return fullNameToDb;
}
public Database getDbIncludeRecycleBin(long dbId) {
Database db = idToDb.get(dbId);
if (db == null) {
db = recycleBin.getDatabase(dbId);
}
return db;
}
public Table getTableIncludeRecycleBin(Database db, long tableId) {
Table table = db.getTable(tableId);
if (table == null) {
table = recycleBin.getTable(db.getId(), tableId);
}
return table;
}
public List<Table> getTablesIncludeRecycleBin(Database db) {
List<Table> tables = db.getTables();
tables.addAll(recycleBin.getTables(db.getId()));
return tables;
}
public Partition getPartitionIncludeRecycleBin(OlapTable table, long partitionId) {
Partition partition = table.getPartition(partitionId);
if (partition == null) {
partition = recycleBin.getPartition(partitionId);
}
return partition;
}
public Collection<Partition> getPartitionsIncludeRecycleBin(OlapTable table) {
Collection<Partition> partitions = new ArrayList<>(table.getPartitions());
partitions.addAll(recycleBin.getPartitions(table.getId()));
return partitions;
}
public Collection<Partition> getAllPartitionsIncludeRecycleBin(OlapTable table) {
Collection<Partition> partitions = table.getAllPartitions();
partitions.addAll(recycleBin.getPartitions(table.getId()));
return partitions;
}
public DataProperty getDataPropertyIncludeRecycleBin(PartitionInfo info, long partitionId) {
DataProperty dataProperty = info.getDataProperty(partitionId);
if (dataProperty == null) {
dataProperty = recycleBin.getPartitionDataProperty(partitionId);
}
return dataProperty;
}
public short getReplicationNumIncludeRecycleBin(PartitionInfo info, long partitionId) {
short replicaNum = info.getReplicationNum(partitionId);
if (replicaNum == (short) -1) {
replicaNum = recycleBin.getPartitionReplicationNum(partitionId);
}
return replicaNum;
}
@Override
public List<String> listDbNames() {
return Lists.newArrayList(fullNameToDb.keySet());
}
@Override
public List<String> listTableNames(String dbName) throws DdlException {
Database database = getDb(dbName);
if (database != null) {
return database.getTables().stream()
.map(Table::getName).collect(Collectors.toList());
} else {
throw new DdlException("Database " + dbName + " doesn't exist");
}
}
@Override
public List<Long> getDbIds() {
return Lists.newArrayList(idToDb.keySet());
}
public List<Long> getDbIdsIncludeRecycleBin() {
List<Long> dbIds = getDbIds();
dbIds.addAll(recycleBin.getAllDbIds());
return dbIds;
}
public HashMap<Long, TStorageMedium> getPartitionIdToStorageMediumMap() {
HashMap<Long, TStorageMedium> storageMediumMap = new HashMap<>();
HashMap<Long, Multimap<Long, Long>> changedPartitionsMap = new HashMap<>();
long currentTimeMs = System.currentTimeMillis();
List<Long> dbIds = getDbIds();
for (long dbId : dbIds) {
Database db = getDb(dbId);
if (db == null) {
LOG.warn("db {} does not exist while doing backend report", dbId);
continue;
}
db.readLock();
try {
for (Table table : db.getTables()) {
if (table.getType() != Table.TableType.OLAP) {
continue;
}
long tableId = table.getId();
OlapTable olapTable = (OlapTable) table;
PartitionInfo partitionInfo = olapTable.getPartitionInfo();
for (Partition partition : olapTable.getAllPartitions()) {
long partitionId = partition.getId();
DataProperty dataProperty = partitionInfo.getDataProperty(partition.getId());
Preconditions.checkNotNull(dataProperty,
partition.getName() + ", pId:" + partitionId + ", db: " + dbId + ", tbl: " + tableId);
if (dataProperty.getStorageMedium() == TStorageMedium.SSD
&& dataProperty.getCooldownTimeMs() < currentTimeMs
&& olapTable.getState() == OlapTable.OlapTableState.NORMAL
&& olapTable.getKeysType() != KeysType.PRIMARY_KEYS) {
Multimap<Long, Long> multimap = changedPartitionsMap.get(dbId);
if (multimap == null) {
multimap = HashMultimap.create();
changedPartitionsMap.put(dbId, multimap);
}
multimap.put(tableId, partitionId);
} else {
storageMediumMap.put(partitionId, dataProperty.getStorageMedium());
}
}
}
} finally {
db.readUnlock();
}
}
for (Long dbId : changedPartitionsMap.keySet()) {
Database db = getDb(dbId);
if (db == null) {
LOG.warn("db {} does not exist while checking backend storage medium", dbId);
continue;
}
Multimap<Long, Long> tableIdToPartitionIds = changedPartitionsMap.get(dbId);
if (!db.tryWriteLock(Database.TRY_LOCK_TIMEOUT_MS, TimeUnit.MILLISECONDS)) {
LOG.warn("try get db {} writelock but failed when hecking backend storage medium", dbId);
continue;
}
Preconditions.checkState(db.isWriteLockHeldByCurrentThread());
try {
for (Long tableId : tableIdToPartitionIds.keySet()) {
Table table = db.getTable(tableId);
if (table == null) {
continue;
}
OlapTable olapTable = (OlapTable) table;
PartitionInfo partitionInfo = olapTable.getPartitionInfo();
Collection<Long> partitionIds = tableIdToPartitionIds.get(tableId);
for (Long partitionId : partitionIds) {
Partition partition = olapTable.getPartition(partitionId);
if (partition == null) {
continue;
}
DataProperty dataProperty = partitionInfo.getDataProperty(partition.getId());
if (dataProperty.getStorageMedium() == TStorageMedium.SSD
&& dataProperty.getCooldownTimeMs() < currentTimeMs) {
DataProperty hdd = new DataProperty(TStorageMedium.HDD);
partitionInfo.setDataProperty(partition.getId(), hdd);
storageMediumMap.put(partitionId, TStorageMedium.HDD);
LOG.debug("partition[{}-{}-{}] storage medium changed from SSD to HDD",
dbId, tableId, partitionId);
ModifyPartitionInfo info =
new ModifyPartitionInfo(db.getId(), olapTable.getId(),
partition.getId(),
hdd,
(short) -1,
partitionInfo.getIsInMemory(partition.getId()));
editLog.logModifyPartition(info);
}
}
}
} finally {
db.writeUnlock();
}
}
return storageMediumMap;
}
/*
* used for handling AlterTableStmt (for client is the ALTER TABLE command).
* including SchemaChangeHandler and RollupHandler
*/
@Override
public void alterTable(AlterTableStmt stmt) throws UserException {
stateMgr.getAlterInstance().processAlterTable(stmt);
}
/**
* used for handling AlterViewStmt (the ALTER VIEW command).
*/
@Override
public void alterView(AlterViewStmt stmt) throws UserException {
stateMgr.getAlterInstance().processAlterView(stmt, ConnectContext.get());
}
@Override
public void createMaterializedView(CreateMaterializedViewStmt stmt)
throws AnalysisException, DdlException {
stateMgr.getAlterInstance().processCreateMaterializedView(stmt);
}
@Override
public void createMaterializedView(CreateMaterializedViewStatement stmt)
throws DdlException {
String mvName = stmt.getTableName().getTbl();
String dbName = stmt.getTableName().getDb();
LOG.debug("Begin create materialized view: {}", mvName);
Database db = this.getDb(dbName);
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
db.readLock();
try {
if (db.getTable(mvName) != null) {
if (stmt.isIfNotExists()) {
LOG.info("Create materialized view [{}] which already exists", mvName);
return;
} else {
ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_EXISTS_ERROR, mvName);
}
}
} finally {
db.readUnlock();
}
List<Column> baseSchema = stmt.getMvColumnItems();
validateColumns(baseSchema);
PartitionDesc partitionDesc = stmt.getPartitionExpDesc();
PartitionInfo partitionInfo;
if (partitionDesc != null) {
partitionInfo = partitionDesc.toPartitionInfo(
Arrays.asList(stmt.getPartitionColumn()),
Maps.newHashMap(), false);
} else {
partitionInfo = new SinglePartitionInfo();
}
DistributionDesc distributionDesc = stmt.getDistributionDesc();
Preconditions.checkNotNull(distributionDesc);
DistributionInfo distributionInfo = distributionDesc.toDistributionInfo(baseSchema);
MaterializedView.MvRefreshScheme mvRefreshScheme;
RefreshSchemeDesc refreshSchemeDesc = stmt.getRefreshSchemeDesc();
if (refreshSchemeDesc.getType() == MaterializedView.RefreshType.ASYNC) {
mvRefreshScheme = new MaterializedView.MvRefreshScheme();
AsyncRefreshSchemeDesc asyncRefreshSchemeDesc = (AsyncRefreshSchemeDesc) refreshSchemeDesc;
MaterializedView.AsyncRefreshContext asyncRefreshContext = mvRefreshScheme.getAsyncRefreshContext();
asyncRefreshContext.setStartTime(Utils.getLongFromDateTime(asyncRefreshSchemeDesc.getStartTime()));
asyncRefreshContext.setDefineStartTime(asyncRefreshSchemeDesc.isDefineStartTime());
if (asyncRefreshSchemeDesc.getIntervalLiteral() != null) {
asyncRefreshContext.setStep(
((IntLiteral) asyncRefreshSchemeDesc.getIntervalLiteral().getValue()).getValue());
asyncRefreshContext.setTimeUnit(
asyncRefreshSchemeDesc.getIntervalLiteral().getUnitIdentifier().getDescription());
}
} else if (refreshSchemeDesc.getType() == MaterializedView.RefreshType.SYNC) {
mvRefreshScheme = new MaterializedView.MvRefreshScheme();
mvRefreshScheme.setType(MaterializedView.RefreshType.SYNC);
} else {
mvRefreshScheme = new MaterializedView.MvRefreshScheme();
mvRefreshScheme.setType(MaterializedView.RefreshType.MANUAL);
}
long mvId = GlobalStateMgr.getCurrentState().getNextId();
MaterializedView materializedView =
new MaterializedView(mvId, db.getId(), mvName, baseSchema, stmt.getKeysType(), partitionInfo,
distributionInfo, mvRefreshScheme);
materializedView.setComment(stmt.getComment());
materializedView.setBaseTableIds(stmt.getBaseTableIds());
materializedView.setViewDefineSql(stmt.getInlineViewDef());
materializedView.setPartitionRefTableExprs(Lists.newArrayList(stmt.getPartitionRefTableExpr()));
long baseIndexId = getNextId();
materializedView.setBaseIndexId(baseIndexId);
int schemaVersion = 0;
int schemaHash = Util.schemaHash(schemaVersion, baseSchema, null, 0d);
short shortKeyColumnCount = GlobalStateMgr.calcShortKeyColumnCount(baseSchema, null);
TStorageType baseIndexStorageType = TStorageType.COLUMN;
materializedView.setIndexMeta(baseIndexId, mvName, baseSchema, schemaVersion, schemaHash,
shortKeyColumnCount, baseIndexStorageType, stmt.getKeysType());
Map<String, String> properties = stmt.getProperties();
short replicationNum = FeConstants.default_replication_num;
try {
boolean isReplicationNumSet =
properties != null && properties.containsKey(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM);
replicationNum = PropertyAnalyzer.analyzeReplicationNum(properties, replicationNum);
if (isReplicationNumSet) {
materializedView.setReplicationNum(replicationNum);
}
} catch (AnalysisException e) {
throw new DdlException(e.getMessage(), e);
}
Map<String, String> optHints = null;
QueryRelation queryRelation = stmt.getQueryStatement().getQueryRelation();
if (queryRelation instanceof SelectRelation) {
SelectRelation selectRelation = (SelectRelation) queryRelation;
optHints = selectRelation.getSelectList().getOptHints();
if (optHints != null && !optHints.isEmpty()) {
SessionVariable sessionVariable = VariableMgr.newSessionVariable();
for (String key : optHints.keySet()) {
VariableMgr.setVar(sessionVariable, new SetVar(key, new StringLiteral(optHints.get(key))), true);
}
}
}
DataProperty dataProperty;
try {
boolean hasMedium = false;
if (properties != null) {
hasMedium = properties.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_MEDIUM);
}
dataProperty = PropertyAnalyzer.analyzeDataProperty(properties,
DataProperty.DEFAULT_DATA_PROPERTY);
if (hasMedium && dataProperty.getStorageMedium() == TStorageMedium.SSD) {
materializedView.setStorageMedium(dataProperty.getStorageMedium());
materializedView.getTableProperty().getProperties()
.put(PropertyAnalyzer.PROPERTIES_STORAGE_COLDOWN_TIME,
String.valueOf(dataProperty.getCooldownTimeMs()));
}
if (properties != null && !properties.isEmpty()) {
throw new DdlException("Unknown properties: " + properties);
}
} catch (AnalysisException e) {
throw new DdlException(e.getMessage(), e);
}
boolean createMvSuccess;
Set<Long> tabletIdSet = new HashSet<>();
if (partitionInfo.getType() == PartitionType.UNPARTITIONED) {
long partitionId = GlobalStateMgr.getCurrentState().getNextId();
Preconditions.checkNotNull(dataProperty);
partitionInfo.setDataProperty(partitionId, dataProperty);
partitionInfo.setReplicationNum(partitionId, replicationNum);
partitionInfo.setIsInMemory(partitionId, false);
partitionInfo.setTabletType(partitionId, TTabletType.TABLET_TYPE_DISK);
Long version = Partition.PARTITION_INIT_VERSION;
Partition partition = createPartition(db, materializedView, partitionId, mvName, version, tabletIdSet);
buildPartitions(db, materializedView, Collections.singletonList(partition));
materializedView.addPartition(partition);
}
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
if (getDb(db.getId()) == null) {
throw new DdlException("Database has been dropped when creating materialized view");
}
createMvSuccess = db.createMaterializedWithLock(materializedView, false);
if (!createMvSuccess) {
for (Long tabletId : tabletIdSet) {
GlobalStateMgr.getCurrentInvertedIndex().deleteTablet(tabletId);
}
if (!stmt.isIfNotExists()) {
ErrorReport
.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, materializedView,
"Materialized view already exists");
} else {
LOG.info("Create materialized view[{}] which already exists", materializedView);
return;
}
}
} finally {
unlock();
}
LOG.info("Successfully create materialized view[{};{}]", mvName, mvId);
if (createMvSuccess) {
createTaskForMaterializedView(dbName, materializedView, optHints);
}
}
private void createTaskForMaterializedView(String dbName, MaterializedView materializedView,
Map<String, String> optHints) throws DdlException {
MaterializedView.RefreshType refreshType = materializedView.getRefreshScheme().getType();
if (refreshType != MaterializedView.RefreshType.SYNC) {
Task task = TaskBuilder.buildMvTask(materializedView, dbName);
MaterializedView.AsyncRefreshContext asyncRefreshContext =
materializedView.getRefreshScheme().getAsyncRefreshContext();
if (refreshType == MaterializedView.RefreshType.MANUAL) {
task.setType(Constants.TaskType.MANUAL);
} else if (refreshType == MaterializedView.RefreshType.ASYNC) {
if (asyncRefreshContext.getTimeUnit() == null) {
task.setType(Constants.TaskType.EVENT_TRIGGERED);
} else {
long startTime = asyncRefreshContext.getStartTime();
TaskSchedule taskSchedule = new TaskSchedule(startTime,
asyncRefreshContext.getStep(),
TimeUtils.convertUnitIdentifierToTimeUnit(asyncRefreshContext.getTimeUnit()));
task.setSchedule(taskSchedule);
task.setType(Constants.TaskType.PERIODICAL);
}
}
if (optHints != null) {
Map<String, String> taskProperties = task.getProperties();
taskProperties.putAll(optHints);
}
TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager();
taskManager.createTask(task, false);
if (task.getType() == Constants.TaskType.EVENT_TRIGGERED) {
taskManager.executeTask(task.getName());
}
}
}
@Override
public void dropMaterializedView(DropMaterializedViewStmt stmt) throws DdlException, MetaNotFoundException {
if (stmt.getDbTblName() != null) {
stateMgr.getAlterInstance().processDropMaterializedView(stmt);
}
Database db = getDb(stmt.getDbName());
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, stmt.getDbName());
}
Table table;
db.readLock();
try {
table = db.getTable(stmt.getMvName());
} finally {
db.readUnlock();
}
if (table instanceof MaterializedView) {
db.dropTable(table.getName(), stmt.isSetIfExists(), true);
Set<Long> baseTableIds = ((MaterializedView) table).getBaseTableIds();
if (baseTableIds != null) {
for (Long baseTableId : baseTableIds) {
OlapTable baseTable = ((OlapTable) db.getTable(baseTableId));
if (baseTable != null) {
baseTable.removeRelatedMaterializedView(table.getId());
}
}
}
TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager();
Task refreshTask = taskManager.getTask(TaskBuilder.getMvTaskName(table.getId()));
if (refreshTask != null) {
taskManager.dropTasks(Lists.newArrayList(refreshTask.getId()), false);
}
} else {
stateMgr.getAlterInstance().processDropMaterializedView(stmt);
}
}
@Override
public void alterMaterializedView(AlterMaterializedViewStatement stmt) throws DdlException, MetaNotFoundException {
stateMgr.getAlterInstance().processAlterMaterializedView(stmt);
}
@Override
public void refreshMaterializedView(String dbName, String mvName, int priority) throws DdlException, MetaNotFoundException {
Database db = this.getDb(dbName);
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
MaterializedView materializedView = null;
db.readLock();
try {
final Table table = db.getTable(mvName);
if (table instanceof MaterializedView) {
materializedView = (MaterializedView) table;
}
} finally {
db.readUnlock();
}
if (materializedView == null) {
throw new MetaNotFoundException(mvName + " is not a materialized view");
}
TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager();
final String mvTaskName = TaskBuilder.getMvTaskName(materializedView.getId());
if (!taskManager.containTask(mvTaskName)) {
Task task = TaskBuilder.buildMvTask(materializedView, dbName);
taskManager.createTask(task, false);
}
taskManager.executeTask(mvTaskName, new ExecuteOption(priority));
}
@Override
public void cancelRefreshMaterializedView(String dbName, String mvName) throws DdlException, MetaNotFoundException {
Database db = this.getDb(dbName);
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
MaterializedView materializedView = null;
db.readLock();
try {
final Table table = db.getTable(mvName);
if (table instanceof MaterializedView) {
materializedView = (MaterializedView) table;
}
} finally {
db.readUnlock();
}
if (materializedView == null) {
throw new MetaNotFoundException(mvName + " is not a materialized view");
}
TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager();
Task refreshTask = taskManager.getTask(TaskBuilder.getMvTaskName(materializedView.getId()));
if (refreshTask != null) {
taskManager.killTask(refreshTask.getName(), false);
}
}
/*
* used for handling CacnelAlterStmt (for client is the CANCEL ALTER
* command). including SchemaChangeHandler and RollupHandler
*/
public void cancelAlter(CancelAlterTableStmt stmt) throws DdlException {
if (stmt.getAlterType() == ShowAlterStmt.AlterType.ROLLUP) {
stateMgr.getRollupHandler().cancel(stmt);
} else if (stmt.getAlterType() == ShowAlterStmt.AlterType.COLUMN) {
stateMgr.getSchemaChangeHandler().cancel(stmt);
} else if (stmt.getAlterType() == ShowAlterStmt.AlterType.MATERIALIZED_VIEW) {
stateMgr.getRollupHandler().cancelMV(stmt);
} else {
throw new DdlException("Cancel " + stmt.getAlterType() + " does not implement yet");
}
}
@Override
public void renameTable(Database db, Table table, TableRenameClause tableRenameClause) throws DdlException {
OlapTable olapTable = (OlapTable) table;
if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) {
throw new DdlException("Table[" + olapTable.getName() + "] is under " + olapTable.getState());
}
String oldTableName = olapTable.getName();
String newTableName = tableRenameClause.getNewTableName();
if (oldTableName.equals(newTableName)) {
throw new DdlException("Same table name");
}
if (db.getTable(newTableName) != null) {
throw new DdlException("Table name[" + newTableName + "] is already used");
}
olapTable.checkAndSetName(newTableName, false);
db.dropTable(oldTableName);
db.createTable(olapTable);
disableMaterializedView(db, olapTable);
TableInfo tableInfo = TableInfo.createForTableRename(db.getId(), olapTable.getId(), newTableName);
editLog.logTableRename(tableInfo);
LOG.info("rename table[{}] to {}, tableId: {}", oldTableName, newTableName, olapTable.getId());
}
private void disableMaterializedView(Database db, OlapTable olapTable) {
for (long mvId : olapTable.getRelatedMaterializedViews()) {
MaterializedView mv = (MaterializedView) db.getTable(mvId);
if (mv != null) {
mv.setActive(false);
} else {
LOG.warn("Ignore materialized view {} does not exists", mvId);
}
}
}
public void replayRenameTable(TableInfo tableInfo) {
long dbId = tableInfo.getDbId();
long tableId = tableInfo.getTableId();
String newTableName = tableInfo.getNewTableName();
Database db = getDb(dbId);
db.writeLock();
try {
OlapTable table = (OlapTable) db.getTable(tableId);
String tableName = table.getName();
db.dropTable(tableName);
table.setName(newTableName);
db.createTable(table);
disableMaterializedView(db, table);
LOG.info("replay rename table[{}] to {}, tableId: {}", tableName, newTableName, table.getId());
} finally {
db.writeUnlock();
}
}
@Override
public void renamePartition(Database db, Table table, PartitionRenameClause renameClause) throws DdlException {
OlapTable olapTable = (OlapTable) table;
if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) {
throw new DdlException("Table[" + olapTable.getName() + "] is under " + olapTable.getState());
}
if (olapTable.getPartitionInfo().getType() != PartitionType.RANGE) {
throw new DdlException("Table[" + olapTable.getName() + "] is single partitioned. "
+ "no need to rename partition name.");
}
String partitionName = renameClause.getPartitionName();
String newPartitionName = renameClause.getNewPartitionName();
if (partitionName.equalsIgnoreCase(newPartitionName)) {
throw new DdlException("Same partition name");
}
Partition partition = olapTable.getPartition(partitionName);
if (partition == null) {
throw new DdlException("Partition[" + partitionName + "] does not exists");
}
if (olapTable.checkPartitionNameExist(newPartitionName)) {
throw new DdlException("Partition name[" + newPartitionName + "] is already used");
}
olapTable.renamePartition(partitionName, newPartitionName);
disableMaterializedView(db, olapTable);
TableInfo tableInfo = TableInfo.createForPartitionRename(db.getId(), olapTable.getId(), partition.getId(),
newPartitionName);
editLog.logPartitionRename(tableInfo);
LOG.info("rename partition[{}] to {}", partitionName, newPartitionName);
}
public void replayRenamePartition(TableInfo tableInfo) {
long dbId = tableInfo.getDbId();
long tableId = tableInfo.getTableId();
long partitionId = tableInfo.getPartitionId();
String newPartitionName = tableInfo.getNewPartitionName();
Database db = getDb(dbId);
db.writeLock();
try {
OlapTable table = (OlapTable) db.getTable(tableId);
Partition partition = table.getPartition(partitionId);
table.renamePartition(partition.getName(), newPartitionName);
disableMaterializedView(db, table);
LOG.info("replay rename partition[{}] to {}", partition.getName(), newPartitionName);
} finally {
db.writeUnlock();
}
}
public void renameRollup(Database db, OlapTable table, RollupRenameClause renameClause) throws DdlException {
if (table.getState() != OlapTable.OlapTableState.NORMAL) {
throw new DdlException("Table[" + table.getName() + "] is under " + table.getState());
}
String rollupName = renameClause.getRollupName();
if (rollupName.equals(table.getName())) {
throw new DdlException("Using ALTER TABLE RENAME to change table name");
}
String newRollupName = renameClause.getNewRollupName();
if (rollupName.equals(newRollupName)) {
throw new DdlException("Same rollup name");
}
Map<String, Long> indexNameToIdMap = table.getIndexNameToId();
if (indexNameToIdMap.get(rollupName) == null) {
throw new DdlException("Rollup index[" + rollupName + "] does not exists");
}
if (indexNameToIdMap.get(newRollupName) != null) {
throw new DdlException("Rollup name[" + newRollupName + "] is already used");
}
long indexId = indexNameToIdMap.remove(rollupName);
indexNameToIdMap.put(newRollupName, indexId);
TableInfo tableInfo = TableInfo.createForRollupRename(db.getId(), table.getId(), indexId, newRollupName);
editLog.logRollupRename(tableInfo);
LOG.info("rename rollup[{}] to {}", rollupName, newRollupName);
}
public void replayRenameRollup(TableInfo tableInfo) {
long dbId = tableInfo.getDbId();
long tableId = tableInfo.getTableId();
long indexId = tableInfo.getIndexId();
String newRollupName = tableInfo.getNewRollupName();
Database db = getDb(dbId);
db.writeLock();
try {
OlapTable table = (OlapTable) db.getTable(tableId);
String rollupName = table.getIndexNameById(indexId);
Map<String, Long> indexNameToIdMap = table.getIndexNameToId();
indexNameToIdMap.remove(rollupName);
indexNameToIdMap.put(newRollupName, indexId);
LOG.info("replay rename rollup[{}] to {}", rollupName, newRollupName);
} finally {
db.writeUnlock();
}
}
public void renameColumn(Database db, OlapTable table, ColumnRenameClause renameClause) throws DdlException {
throw new DdlException("not implmented");
}
public void replayRenameColumn(TableInfo tableInfo) throws DdlException {
throw new DdlException("not implmented");
}
public void modifyTableDynamicPartition(Database db, OlapTable table, Map<String, String> properties)
throws DdlException {
Map<String, String> logProperties = new HashMap<>(properties);
TableProperty tableProperty = table.getTableProperty();
if (tableProperty == null) {
DynamicPartitionUtil.checkAndSetDynamicPartitionProperty(table, properties);
} else {
Map<String, String> analyzedDynamicPartition = DynamicPartitionUtil.analyzeDynamicPartition(properties);
tableProperty.modifyTableProperties(analyzedDynamicPartition);
tableProperty.buildDynamicProperty();
}
DynamicPartitionUtil.registerOrRemoveDynamicPartitionTable(db.getId(), table);
stateMgr.getDynamicPartitionScheduler().createOrUpdateRuntimeInfo(
table.getName(), DynamicPartitionScheduler.LAST_UPDATE_TIME, TimeUtils.getCurrentFormatTime());
ModifyTablePropertyOperationLog info =
new ModifyTablePropertyOperationLog(db.getId(), table.getId(), logProperties);
editLog.logDynamicPartition(info);
}
/**
* Set replication number for unpartitioned table.
* ATTN: only for unpartitioned table now.
*
* @param db
* @param table
* @param properties
* @throws DdlException
*/
public void modifyTableReplicationNum(Database db, OlapTable table, Map<String, String> properties)
throws DdlException {
Preconditions.checkArgument(db.isWriteLockHeldByCurrentThread());
if (colocateTableIndex.isColocateTable(table.getId())) {
throw new DdlException("table " + table.getName() + " is colocate table, cannot change replicationNum");
}
String defaultReplicationNumName = "default." + PropertyAnalyzer.PROPERTIES_REPLICATION_NUM;
PartitionInfo partitionInfo = table.getPartitionInfo();
if (partitionInfo.getType() == PartitionType.RANGE) {
throw new DdlException(
"This is a range partitioned table, you should specify partitions with MODIFY PARTITION clause." +
" If you want to set default replication number, please use '" + defaultReplicationNumName +
"' instead of '" + PropertyAnalyzer.PROPERTIES_REPLICATION_NUM + "' to escape misleading.");
}
String partitionName = table.getName();
Partition partition = table.getPartition(partitionName);
if (partition == null) {
throw new DdlException("Partition does not exist. name: " + partitionName);
}
short replicationNum = Short.parseShort(properties.get(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM));
boolean isInMemory = partitionInfo.getIsInMemory(partition.getId());
DataProperty newDataProperty = partitionInfo.getDataProperty(partition.getId());
partitionInfo.setReplicationNum(partition.getId(), replicationNum);
table.setReplicationNum(replicationNum);
ModifyPartitionInfo info = new ModifyPartitionInfo(db.getId(), table.getId(), partition.getId(),
newDataProperty, replicationNum, isInMemory);
editLog.logModifyPartition(info);
LOG.info("modify partition[{}-{}-{}] replication num to {}", db.getOriginName(), table.getName(),
partition.getName(), replicationNum);
}
/**
* Set default replication number for a specified table.
* You can see the default replication number by Show Create Table stmt.
*
* @param db
* @param table
* @param properties
*/
public void modifyTableDefaultReplicationNum(Database db, OlapTable table, Map<String, String> properties)
throws DdlException {
Preconditions.checkArgument(db.isWriteLockHeldByCurrentThread());
if (colocateTableIndex.isColocateTable(table.getId())) {
throw new DdlException("table " + table.getName() + " is colocate table, cannot change replicationNum");
}
PartitionInfo partitionInfo = table.getPartitionInfo();
Partition partition = null;
boolean isUnpartitionedTable = false;
if (partitionInfo.getType() == PartitionType.UNPARTITIONED) {
isUnpartitionedTable = true;
String partitionName = table.getName();
partition = table.getPartition(partitionName);
if (partition == null) {
throw new DdlException("Partition does not exist. name: " + partitionName);
}
}
TableProperty tableProperty = table.getTableProperty();
if (tableProperty == null) {
tableProperty = new TableProperty(properties);
table.setTableProperty(tableProperty);
} else {
tableProperty.modifyTableProperties(properties);
}
tableProperty.buildReplicationNum();
if (isUnpartitionedTable) {
Preconditions.checkNotNull(partition);
partitionInfo.setReplicationNum(partition.getId(), tableProperty.getReplicationNum());
}
ModifyTablePropertyOperationLog info =
new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties);
editLog.logModifyReplicationNum(info);
LOG.info("modify table[{}] replication num to {}", table.getName(),
properties.get(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM));
}
public void modifyTableEnablePersistentIndexMeta(Database db, OlapTable table, Map<String, String> properties) {
Preconditions.checkArgument(db.isWriteLockHeldByCurrentThread());
TableProperty tableProperty = table.getTableProperty();
if (tableProperty == null) {
tableProperty = new TableProperty(properties);
table.setTableProperty(tableProperty);
} else {
tableProperty.modifyTableProperties(properties);
}
tableProperty.buildEnablePersistentIndex();
ModifyTablePropertyOperationLog info =
new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties);
editLog.logModifyEnablePersistentIndex(info);
}
public void modifyTableInMemoryMeta(Database db, OlapTable table, Map<String, String> properties) {
Preconditions.checkArgument(db.isWriteLockHeldByCurrentThread());
TableProperty tableProperty = table.getTableProperty();
if (tableProperty == null) {
tableProperty = new TableProperty(properties);
table.setTableProperty(tableProperty);
} else {
tableProperty.modifyTableProperties(properties);
}
tableProperty.buildInMemory();
for (Partition partition : table.getPartitions()) {
table.getPartitionInfo().setIsInMemory(partition.getId(), tableProperty.isInMemory());
}
ModifyTablePropertyOperationLog info =
new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties);
editLog.logModifyInMemory(info);
}
public void modifyTableMeta(Database db, OlapTable table, Map<String, String> properties,
TTabletMetaType metaType) {
if (metaType == TTabletMetaType.INMEMORY) {
modifyTableInMemoryMeta(db, table, properties);
} else if (metaType == TTabletMetaType.ENABLE_PERSISTENT_INDEX) {
modifyTableEnablePersistentIndexMeta(db, table, properties);
}
}
public void setHasForbitGlobalDict(String dbName, String tableName, boolean isForbit) throws DdlException {
Map<String, String> property = new HashMap<>();
Database db = getDb(dbName);
if (db == null) {
throw new DdlException("the DB " + dbName + " is not exist");
}
db.readLock();
try {
Table table = db.getTable(tableName);
if (table == null) {
throw new DdlException("the DB " + dbName + " table: " + tableName + "isn't exist");
}
if (table instanceof OlapTable) {
OlapTable olapTable = (OlapTable) table;
olapTable.setHasForbitGlobalDict(isForbit);
if (isForbit) {
property.put(PropertyAnalyzer.ENABLE_LOW_CARD_DICT_TYPE, PropertyAnalyzer.DISABLE_LOW_CARD_DICT);
IDictManager.getInstance().disableGlobalDict(olapTable.getId());
} else {
property.put(PropertyAnalyzer.ENABLE_LOW_CARD_DICT_TYPE, PropertyAnalyzer.ABLE_LOW_CARD_DICT);
IDictManager.getInstance().enableGlobalDict(olapTable.getId());
}
ModifyTablePropertyOperationLog info =
new ModifyTablePropertyOperationLog(db.getId(), table.getId(), property);
editLog.logSetHasForbitGlobalDict(info);
}
} finally {
db.readUnlock();
}
}
public void replayModifyHiveTableColumn(short opCode, ModifyTableColumnOperationLog info) {
if (info.getDbName() == null) {
return;
}
String hiveExternalDb = info.getDbName();
String hiveExternalTable = info.getTableName();
LOG.info("replayModifyTableColumn hiveDb:{},hiveTable:{}", hiveExternalDb, hiveExternalTable);
List<Column> columns = info.getColumns();
Database db = getDb(hiveExternalDb);
HiveTable table;
db.writeLock();
try {
Table tbl = db.getTable(hiveExternalTable);
table = (HiveTable) tbl;
table.setNewFullSchema(columns);
} finally {
db.writeUnlock();
}
}
public void replayModifyTableProperty(short opCode, ModifyTablePropertyOperationLog info) {
long dbId = info.getDbId();
long tableId = info.getTableId();
Map<String, String> properties = info.getProperties();
Database db = getDb(dbId);
db.writeLock();
try {
OlapTable olapTable = (OlapTable) db.getTable(tableId);
if (opCode == OperationType.OP_SET_FORBIT_GLOBAL_DICT) {
String enAble = properties.get(PropertyAnalyzer.ENABLE_LOW_CARD_DICT_TYPE);
Preconditions.checkState(enAble != null);
if (olapTable != null) {
if (enAble.equals(PropertyAnalyzer.DISABLE_LOW_CARD_DICT)) {
olapTable.setHasForbitGlobalDict(true);
IDictManager.getInstance().disableGlobalDict(olapTable.getId());
} else {
olapTable.setHasForbitGlobalDict(false);
IDictManager.getInstance().enableGlobalDict(olapTable.getId());
}
}
} else {
TableProperty tableProperty = olapTable.getTableProperty();
if (tableProperty == null) {
tableProperty = new TableProperty(properties);
olapTable.setTableProperty(tableProperty.buildProperty(opCode));
} else {
tableProperty.modifyTableProperties(properties);
tableProperty.buildProperty(opCode);
}
if (opCode == OperationType.OP_MODIFY_IN_MEMORY) {
for (Partition partition : olapTable.getPartitions()) {
olapTable.getPartitionInfo().setIsInMemory(partition.getId(), tableProperty.isInMemory());
}
} else if (opCode == OperationType.OP_MODIFY_REPLICATION_NUM) {
PartitionInfo partitionInfo = olapTable.getPartitionInfo();
if (partitionInfo.getType() == PartitionType.UNPARTITIONED) {
String partitionName = olapTable.getName();
Partition partition = olapTable.getPartition(partitionName);
if (partition != null) {
partitionInfo.setReplicationNum(partition.getId(), tableProperty.getReplicationNum());
}
}
} else if (opCode == OperationType.OP_MODIFY_ENABLE_PERSISTENT_INDEX) {
olapTable.setEnablePersistentIndex(tableProperty.enablePersistentIndex());
}
}
} finally {
db.writeUnlock();
}
}
@Override
public void createView(CreateViewStmt stmt) throws DdlException {
String dbName = stmt.getDbName();
String tableName = stmt.getTable();
Database db = this.getDb(stmt.getDbName());
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
db.readLock();
try {
if (db.getTable(tableName) != null) {
if (stmt.isSetIfNotExists()) {
LOG.info("create view[{}] which already exists", tableName);
return;
} else {
ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_EXISTS_ERROR, tableName);
}
}
} finally {
db.readUnlock();
}
List<Column> columns = stmt.getColumns();
long tableId = getNextId();
View newView = new View(tableId, tableName, columns);
newView.setComment(stmt.getComment());
newView.setInlineViewDefWithSqlMode(stmt.getInlineViewDef(),
ConnectContext.get().getSessionVariable().getSqlMode());
try {
newView.init();
} catch (UserException e) {
throw new DdlException("failed to init view stmt", e);
}
if (!tryLock(false)) {
throw new DdlException("Failed to acquire globalStateMgr lock. Try again");
}
try {
if (getDb(db.getId()) == null) {
throw new DdlException("database has been dropped when creating view");
}
if (!db.createTableWithLock(newView, false)) {
if (!stmt.isSetIfNotExists()) {
ErrorReport.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, tableName, "table already exists");
} else {
LOG.info("create table[{}] which already exists", tableName);
return;
}
}
} finally {
unlock();
}
LOG.info("successfully create view[" + tableName + "-" + newView.getId() + "]");
}
public void replayCreateCluster(Cluster cluster) {
tryLock(true);
try {
unprotectCreateCluster(cluster);
} finally {
unlock();
}
}
private void unprotectCreateCluster(Cluster cluster) {
Preconditions.checkState(cluster.isDefaultCluster(), "Cluster must be default cluster");
Preconditions.checkState(cluster.isEmpty(), "Cluster backendIdList must be 0");
defaultCluster = cluster;
final InfoSchemaDb infoDb = new InfoSchemaDb();
unprotectCreateDb(infoDb);
stateMgr.setIsDefaultClusterCreated(true);
}
public Cluster getCluster() {
return defaultCluster;
}
public long loadCluster(DataInputStream dis, long checksum) throws IOException {
if (GlobalStateMgr.getCurrentStateJournalVersion() >= FeMetaVersion.VERSION_30) {
int clusterCount = dis.readInt();
checksum ^= clusterCount;
for (long i = 0; i < clusterCount; ++i) {
final Cluster cluster = Cluster.read(dis);
checksum ^= cluster.getId();
Preconditions.checkState(cluster.isDefaultCluster(), "Cluster must be default_cluster");
List<Long> latestBackendIds = stateMgr.getClusterInfo().getBackendIds();
cluster.setBackendIdList(latestBackendIds);
String dbName = InfoSchemaDb.getFullInfoSchemaDbName();
InfoSchemaDb db;
if (getFullNameToDb().containsKey(dbName)) {
db = (InfoSchemaDb) GlobalStateMgr.getCurrentState().getFullNameToDb().get(dbName);
} else {
db = new InfoSchemaDb();
}
String errMsg = "InfoSchemaDb id shouldn't larger than 10000, please restart your FE server";
Preconditions.checkState(db.getId() < NEXT_ID_INIT_VALUE, errMsg);
idToDb.put(db.getId(), db);
fullNameToDb.put(db.getFullName(), db);
cluster.addDb(dbName, db.getId());
defaultCluster = cluster;
}
}
LOG.info("finished replay cluster from image");
return checksum;
}
public void initDefaultCluster() {
final List<Long> backendList = Lists.newArrayList();
final List<Backend> defaultClusterBackends = systemInfoService.getBackends();
for (Backend backend : defaultClusterBackends) {
backendList.add(backend.getId());
}
final long id = getNextId();
final Cluster cluster = new Cluster(SystemInfoService.DEFAULT_CLUSTER, id);
Set<String> beHost = Sets.newHashSet();
for (Backend be : defaultClusterBackends) {
if (beHost.contains(be.getHost())) {
LOG.error("found more than one backends in same host: {}", be.getHost());
System.exit(-1);
} else {
beHost.add(be.getHost());
}
}
cluster.setBackendIdList(backendList);
unprotectCreateCluster(cluster);
for (Database db : idToDb.values()) {
cluster.addDb(db.getFullName(), db.getId());
}
stateMgr.setIsDefaultClusterCreated(true);
editLog.logCreateCluster(cluster);
}
public long saveCluster(DataOutputStream dos, long checksum) throws IOException {
final int clusterCount = 1;
checksum ^= clusterCount;
dos.writeInt(clusterCount);
Cluster cluster = defaultCluster;
long clusterId = defaultCluster.getId();
if (clusterId >= NEXT_ID_INIT_VALUE) {
checksum ^= clusterId;
cluster.write(dos);
}
return checksum;
}
public void replayUpdateClusterAndBackends(BackendIdsUpdateInfo info) {
for (long id : info.getBackendList()) {
final Backend backend = stateMgr.getClusterInfo().getBackend(id);
final Cluster cluster = defaultCluster;
cluster.removeBackend(id);
backend.setDecommissioned(false);
backend.clearClusterName();
backend.setBackendState(Backend.BackendState.free);
}
}
/*
* Truncate specified table or partitions.
* The main idea is:
*
* 1. using the same schema to create new table(partitions)
* 2. use the new created table(partitions) to replace the old ones.
*
* if no partition specified, it will truncate all partitions of this table, including all temp partitions,
* otherwise, it will only truncate those specified partitions.
*
*/
@Override
public void truncateTable(TruncateTableStmt truncateTableStmt) throws DdlException {
TableRef tblRef = truncateTableStmt.getTblRef();
TableName dbTbl = tblRef.getName();
Map<String, Long> origPartitions = Maps.newHashMap();
OlapTable copiedTbl;
Database db = getDb(dbTbl.getDb());
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbTbl.getDb());
}
boolean truncateEntireTable = tblRef.getPartitionNames() == null;
db.readLock();
try {
Table table = db.getTable(dbTbl.getTbl());
if (table == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, dbTbl.getTbl());
}
if (!table.isOlapOrLakeTable()) {
throw new DdlException("Only support truncate OLAP table or LAKE table");
}
OlapTable olapTable = (OlapTable) table;
if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) {
throw new DdlException("Table' state is not NORMAL: " + olapTable.getState());
}
if (!truncateEntireTable) {
for (String partName : tblRef.getPartitionNames().getPartitionNames()) {
Partition partition = olapTable.getPartition(partName);
if (partition == null) {
throw new DdlException("Partition " + partName + " does not exist");
}
origPartitions.put(partName, partition.getId());
}
} else {
for (Partition partition : olapTable.getPartitions()) {
origPartitions.put(partition.getName(), partition.getId());
}
}
copiedTbl = olapTable.selectiveCopy(origPartitions.keySet(), true, MaterializedIndex.IndexExtState.VISIBLE);
} finally {
db.readUnlock();
}
List<Partition> newPartitions = Lists.newArrayListWithCapacity(origPartitions.size());
Set<Long> tabletIdSet = Sets.newHashSet();
try {
for (Map.Entry<String, Long> entry : origPartitions.entrySet()) {
long oldPartitionId = entry.getValue();
long newPartitionId = getNextId();
String newPartitionName = entry.getKey();
PartitionInfo partitionInfo = copiedTbl.getPartitionInfo();
partitionInfo.setTabletType(newPartitionId, partitionInfo.getTabletType(oldPartitionId));
partitionInfo.setIsInMemory(newPartitionId, partitionInfo.getIsInMemory(oldPartitionId));
partitionInfo.setReplicationNum(newPartitionId, partitionInfo.getReplicationNum(oldPartitionId));
partitionInfo.setDataProperty(newPartitionId, partitionInfo.getDataProperty(oldPartitionId));
if (copiedTbl.isLakeTable()) {
partitionInfo.setStorageInfo(newPartitionId, partitionInfo.getStorageInfo(oldPartitionId));
}
Partition newPartition =
createPartition(db, copiedTbl, newPartitionId, newPartitionName, null, tabletIdSet);
newPartitions.add(newPartition);
}
buildPartitions(db, copiedTbl, newPartitions);
} catch (DdlException e) {
deleteUselessTabletAndShard(tabletIdSet, copiedTbl);
throw e;
}
Preconditions.checkState(origPartitions.size() == newPartitions.size());
db.writeLock();
try {
OlapTable olapTable = (OlapTable) db.getTable(copiedTbl.getId());
if (olapTable == null) {
throw new DdlException("Table[" + copiedTbl.getName() + "] is dropped");
}
if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) {
throw new DdlException("Table' state is not NORMAL: " + olapTable.getState());
}
for (Map.Entry<String, Long> entry : origPartitions.entrySet()) {
Partition partition = copiedTbl.getPartition(entry.getValue());
if (partition == null || !partition.getName().equalsIgnoreCase(entry.getKey())) {
throw new DdlException("Partition [" + entry.getKey() + "] is changed");
}
}
boolean metaChanged = false;
if (olapTable.getIndexNameToId().size() != copiedTbl.getIndexNameToId().size()) {
metaChanged = true;
} else {
Map<Long, Integer> copiedIndexIdToSchemaHash = copiedTbl.getIndexIdToSchemaHash();
for (Map.Entry<Long, Integer> entry : olapTable.getIndexIdToSchemaHash().entrySet()) {
long indexId = entry.getKey();
if (!copiedIndexIdToSchemaHash.containsKey(indexId)) {
metaChanged = true;
break;
}
if (!copiedIndexIdToSchemaHash.get(indexId).equals(entry.getValue())) {
metaChanged = true;
break;
}
}
}
if (metaChanged) {
throw new DdlException("Table[" + copiedTbl.getName() + "]'s meta has been changed. try again.");
}
truncateTableInternal(olapTable, newPartitions, truncateEntireTable, false);
TruncateTableInfo info = new TruncateTableInfo(db.getId(), olapTable.getId(), newPartitions,
truncateEntireTable);
editLog.logTruncateTable(info);
Set<Long> relatedMvs = olapTable.getRelatedMaterializedViews();
for (long mvId : relatedMvs) {
MaterializedView materializedView = (MaterializedView) db.getTable(mvId);
if (materializedView.isLoadTriggeredRefresh()) {
refreshMaterializedView(db.getFullName(), db.getTable(mvId).getName(),
Constants.TaskRunPriority.NORMAL.value());
}
}
} catch (DdlException e) {
deleteUselessTabletAndShard(tabletIdSet, copiedTbl);
throw e;
} catch (MetaNotFoundException e) {
LOG.warn("Table related materialized view can not be found", e);
} finally {
db.writeUnlock();
}
LOG.info("finished to truncate table {}, partitions: {}",
tblRef.getName().toSql(), tblRef.getPartitionNames());
}
private void deleteUselessTabletAndShard(Set<Long> tabletIdSet, OlapTable olapTable) {
for (Long tabletId : tabletIdSet) {
GlobalStateMgr.getCurrentInvertedIndex().deleteTablet(tabletId);
}
if (olapTable.isLakeTable() && !tabletIdSet.isEmpty()) {
stateMgr.getShardManager().getShardDeleter().addUnusedShardId(tabletIdSet);
editLog.logAddUnusedShard(tabletIdSet);
}
}
private void truncateTableInternal(OlapTable olapTable, List<Partition> newPartitions,
boolean isEntireTable, boolean isReplay) {
Set<Long> oldTabletIds = Sets.newHashSet();
for (Partition newPartition : newPartitions) {
Partition oldPartition = olapTable.replacePartition(newPartition);
for (MaterializedIndex index : oldPartition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
index.getTablets().stream().forEach(t -> {
oldTabletIds.add(t.getId());
});
}
}
if (isEntireTable) {
olapTable.dropAllTempPartitions();
}
for (Long tabletId : oldTabletIds) {
GlobalStateMgr.getCurrentInvertedIndex().deleteTablet(tabletId);
}
if (olapTable.isLakeTable() && !isReplay) {
stateMgr.getShardManager().getShardDeleter().addUnusedShardId(oldTabletIds);
editLog.logAddUnusedShard(oldTabletIds);
}
}
public void replayTruncateTable(TruncateTableInfo info) {
Database db = getDb(info.getDbId());
db.writeLock();
try {
OlapTable olapTable = (OlapTable) db.getTable(info.getTblId());
truncateTableInternal(olapTable, info.getPartitions(), info.isEntireTable(), true);
if (!GlobalStateMgr.isCheckpointThread()) {
TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex();
for (Partition partition : info.getPartitions()) {
long partitionId = partition.getId();
TStorageMedium medium = olapTable.getPartitionInfo().getDataProperty(
partitionId).getStorageMedium();
for (MaterializedIndex mIndex : partition.getMaterializedIndices(
MaterializedIndex.IndexExtState.ALL)) {
long indexId = mIndex.getId();
int schemaHash = olapTable.getSchemaHashByIndexId(indexId);
TabletMeta tabletMeta = new TabletMeta(db.getId(), olapTable.getId(),
partitionId, indexId, schemaHash, medium, olapTable.isLakeTable());
for (Tablet tablet : mIndex.getTablets()) {
long tabletId = tablet.getId();
invertedIndex.addTablet(tabletId, tabletMeta);
if (olapTable.isOlapTable()) {
for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) {
invertedIndex.addReplica(tabletId, replica);
}
}
}
}
}
}
} finally {
db.writeUnlock();
}
}
public void replayBackendTabletsInfo(BackendTabletsInfo backendTabletsInfo) {
List<Pair<Long, Integer>> tabletsWithSchemaHash = backendTabletsInfo.getTabletSchemaHash();
if (!tabletsWithSchemaHash.isEmpty()) {
for (Pair<Long, Integer> tabletInfo : tabletsWithSchemaHash) {
LOG.warn("find an old backendTabletsInfo for tablet {}, ignore it", tabletInfo.first);
}
return;
}
List<ReplicaPersistInfo> replicaPersistInfos = backendTabletsInfo.getReplicaPersistInfos();
for (ReplicaPersistInfo info : replicaPersistInfos) {
long dbId = info.getDbId();
Database db = getDb(dbId);
if (db == null) {
continue;
}
db.writeLock();
try {
OlapTable tbl = (OlapTable) db.getTable(info.getTableId());
if (tbl == null) {
continue;
}
Partition partition = tbl.getPartition(info.getPartitionId());
if (partition == null) {
continue;
}
MaterializedIndex mindex = partition.getIndex(info.getIndexId());
if (mindex == null) {
continue;
}
LocalTablet tablet = (LocalTablet) mindex.getTablet(info.getTabletId());
if (tablet == null) {
continue;
}
Replica replica = tablet.getReplicaById(info.getReplicaId());
if (replica != null) {
replica.setBad(true);
LOG.debug("get replica {} of tablet {} on backend {} to bad when replaying",
info.getReplicaId(), info.getTabletId(), info.getBackendId());
}
} finally {
db.writeUnlock();
}
}
}
public void convertDistributionType(Database db, OlapTable tbl) throws DdlException {
db.writeLock();
try {
if (!tbl.convertRandomDistributionToHashDistribution()) {
throw new DdlException("Table " + tbl.getName() + " is not random distributed");
}
TableInfo tableInfo = TableInfo.createForModifyDistribution(db.getId(), tbl.getId());
editLog.logModifyDistributionType(tableInfo);
LOG.info("finished to modify distribution type of table: " + tbl.getName());
} finally {
db.writeUnlock();
}
}
public void replayConvertDistributionType(TableInfo tableInfo) {
Database db = getDb(tableInfo.getDbId());
db.writeLock();
try {
OlapTable tbl = (OlapTable) db.getTable(tableInfo.getTableId());
tbl.convertRandomDistributionToHashDistribution();
LOG.info("replay modify distribution type of table: " + tbl.getName());
} finally {
db.writeUnlock();
}
}
/*
* The entry of replacing partitions with temp partitions.
*/
public void replaceTempPartition(Database db, String tableName, ReplacePartitionClause clause) throws DdlException {
List<String> partitionNames = clause.getPartitionNames();
List<String> tempPartitionNames =
clause.getTempPartitionNames().stream().distinct().collect(Collectors.toList());
boolean isStrictRange = clause.isStrictRange();
boolean useTempPartitionName = clause.useTempPartitionName();
db.writeLock();
try {
Table table = db.getTable(tableName);
if (table == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName);
}
if (!table.isOlapOrLakeTable()) {
throw new DdlException("Table[" + tableName + "] is not OLAP table or LAKE table");
}
OlapTable olapTable = (OlapTable) table;
for (String partName : partitionNames) {
if (!olapTable.checkPartitionNameExist(partName, false)) {
throw new DdlException("Partition[" + partName + "] does not exist");
}
}
for (String partName : tempPartitionNames) {
if (!olapTable.checkPartitionNameExist(partName, true)) {
throw new DdlException("Temp partition[" + partName + "] does not exist");
}
}
olapTable.replaceTempPartitions(partitionNames, tempPartitionNames, isStrictRange, useTempPartitionName);
ReplacePartitionOperationLog info = new ReplacePartitionOperationLog(db.getId(), olapTable.getId(),
partitionNames, tempPartitionNames, isStrictRange, useTempPartitionName);
editLog.logReplaceTempPartition(info);
LOG.info("finished to replace partitions {} with temp partitions {} from table: {}",
clause.getPartitionNames(), clause.getTempPartitionNames(), tableName);
} finally {
db.writeUnlock();
}
}
public void replayReplaceTempPartition(ReplacePartitionOperationLog replaceTempPartitionLog) {
Database db = getDb(replaceTempPartitionLog.getDbId());
if (db == null) {
return;
}
db.writeLock();
try {
OlapTable olapTable = (OlapTable) db.getTable(replaceTempPartitionLog.getTblId());
if (olapTable == null) {
return;
}
olapTable.replaceTempPartitions(replaceTempPartitionLog.getPartitions(),
replaceTempPartitionLog.getTempPartitions(),
replaceTempPartitionLog.isStrictRange(),
replaceTempPartitionLog.useTempPartitionName());
} catch (DdlException e) {
LOG.warn("should not happen.", e);
} finally {
db.writeUnlock();
}
}
public void checkTablets(AdminCheckTabletsStmt stmt) {
AdminCheckTabletsStmt.CheckType type = stmt.getType();
if (type == AdminCheckTabletsStmt.CheckType.CONSISTENCY) {
stateMgr.getConsistencyChecker().addTabletsToCheck(stmt.getTabletIds());
}
}
public void setReplicaStatus(AdminSetReplicaStatusStmt stmt) {
long tabletId = stmt.getTabletId();
long backendId = stmt.getBackendId();
Replica.ReplicaStatus status = stmt.getStatus();
setReplicaStatusInternal(tabletId, backendId, status, false);
}
public void replaySetReplicaStatus(SetReplicaStatusOperationLog log) {
setReplicaStatusInternal(log.getTabletId(), log.getBackendId(), log.getReplicaStatus(), true);
}
private void setReplicaStatusInternal(long tabletId, long backendId, Replica.ReplicaStatus status,
boolean isReplay) {
TabletMeta meta = stateMgr.getTabletInvertedIndex().getTabletMeta(tabletId);
if (meta == null) {
LOG.info("tablet {} does not exist", tabletId);
return;
}
long dbId = meta.getDbId();
Database db = getDb(dbId);
if (db == null) {
LOG.info("database {} of tablet {} does not exist", dbId, tabletId);
return;
}
db.writeLock();
try {
Replica replica = stateMgr.getTabletInvertedIndex().getReplica(tabletId, backendId);
if (replica == null) {
LOG.info("replica of tablet {} does not exist", tabletId);
return;
}
if (status == Replica.ReplicaStatus.BAD || status == Replica.ReplicaStatus.OK) {
if (replica.setBadForce(status == Replica.ReplicaStatus.BAD)) {
if (!isReplay) {
SetReplicaStatusOperationLog log =
new SetReplicaStatusOperationLog(backendId, tabletId, status);
editLog.logSetReplicaStatus(log);
}
LOG.info("set replica {} of tablet {} on backend {} as {}. is replay: {}",
replica.getId(), tabletId, backendId, status, isReplay);
}
}
} finally {
db.writeUnlock();
}
}
public void onEraseDatabase(long dbId) {
stateMgr.getGlobalTransactionMgr().removeDatabaseTransactionMgr(dbId);
}
public void onEraseTable(@NotNull OlapTable olapTable) {
TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex();
Collection<Partition> allPartitions = olapTable.getAllPartitions();
for (Partition partition : allPartitions) {
for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
for (Tablet tablet : index.getTablets()) {
invertedIndex.deleteTablet(tablet.getId());
}
}
}
colocateTableIndex.removeTable(olapTable.getId());
}
public Set<Long> onErasePartition(Partition partition) {
Set<Long> tabletIdSet = new HashSet<Long>();
TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex();
for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
for (Tablet tablet : index.getTablets()) {
long tabletId = tablet.getId();
TabletMeta tabletMeta = invertedIndex.getTabletMeta(tabletId);
if (tabletMeta != null && tabletMeta.isLakeTablet()) {
tabletIdSet.add(tabletId);
}
invertedIndex.deleteTablet(tabletId);
}
}
return tabletIdSet;
}
@VisibleForTesting
public void clear() {
if (idToDb != null) {
idToDb.clear();
}
if (fullNameToDb != null) {
fullNameToDb.clear();
}
System.gc();
}
@VisibleForTesting
public OlapTable getCopiedTable(Database db, OlapTable olapTable, List<Long> sourcePartitionIds,
Map<Long, String> origPartitions) {
OlapTable copiedTbl;
db.readLock();
try {
if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) {
throw new RuntimeException("Table' state is not NORMAL: " + olapTable.getState()
+ ", tableId:" + olapTable.getId() + ", tabletName:" + olapTable.getName());
}
for (Long id : sourcePartitionIds) {
origPartitions.put(id, olapTable.getPartition(id).getName());
}
copiedTbl = olapTable.selectiveCopy(origPartitions.values(), true, MaterializedIndex.IndexExtState.VISIBLE);
} finally {
db.readUnlock();
}
return copiedTbl;
}
@VisibleForTesting
public List<Partition> getNewPartitionsFromPartitions(Database db, OlapTable olapTable, List<Long> sourcePartitionIds,
Map<Long, String> origPartitions, OlapTable copiedTbl,
String namePostfix, Set<Long> tabletIdSet, List<Long> tmpPartitionIds)
throws DdlException {
List<Partition> newPartitions = Lists.newArrayListWithCapacity(sourcePartitionIds.size());
for (int i = 0; i < sourcePartitionIds.size(); ++i) {
long newPartitionId = tmpPartitionIds.get(i);
long sourcePartitionId = sourcePartitionIds.get(i);
String newPartitionName = origPartitions.get(sourcePartitionId) + namePostfix;
if (olapTable.checkPartitionNameExist(newPartitionName, true)) {
LOG.warn("partition:{} already exists in table:{}", newPartitionName, olapTable.getName());
continue;
}
PartitionInfo partitionInfo = copiedTbl.getPartitionInfo();
partitionInfo.setTabletType(newPartitionId, partitionInfo.getTabletType(sourcePartitionId));
partitionInfo.setIsInMemory(newPartitionId, partitionInfo.getIsInMemory(sourcePartitionId));
partitionInfo.setReplicationNum(newPartitionId, partitionInfo.getReplicationNum(sourcePartitionId));
partitionInfo.setDataProperty(newPartitionId, partitionInfo.getDataProperty(sourcePartitionId));
Partition newPartition =
createPartition(db, copiedTbl, newPartitionId, newPartitionName, null, tabletIdSet);
newPartitions.add(newPartition);
}
return newPartitions;
}
public List<Partition> createTempPartitionsFromPartitions(Database db, Table table,
String namePostfix, List<Long> sourcePartitionIds,
List<Long> tmpPartitionIds) {
Preconditions.checkState(table instanceof OlapTable);
OlapTable olapTable = (OlapTable) table;
Map<Long, String> origPartitions = Maps.newHashMap();
OlapTable copiedTbl = getCopiedTable(db, olapTable, sourcePartitionIds, origPartitions);
List<Partition> newPartitions = null;
Set<Long> tabletIdSet = Sets.newHashSet();
try {
newPartitions = getNewPartitionsFromPartitions(db, olapTable, sourcePartitionIds, origPartitions,
copiedTbl, namePostfix, tabletIdSet, tmpPartitionIds);
buildPartitions(db, copiedTbl, newPartitions);
} catch (Exception e) {
for (Long tabletId : tabletIdSet) {
GlobalStateMgr.getCurrentInvertedIndex().deleteTablet(tabletId);
}
LOG.warn("create partitions from partitions failed.", e);
throw new RuntimeException("create partitions failed", e);
}
return newPartitions;
}
} |
why is this one AND inside WEAKAND? | public void container_and_referenced_content() {
try (Application application =
Application.fromApplicationPackage(new File("src/test/app-packages/withcontent"), Networking.disable)) {
Result result = application.getJDisc("default").search().process(new ComponentSpecification("default"),
new Query("?query=substring:foobar&timeout=20000"));
assertEquals("WEAKAND(100) (AND substring:fo substring:oo substring:ob substring:ba substring:ar)",
result.hits().get("hasQuery").getQuery().getModel().getQueryTree().toString());
}
} | assertEquals("WEAKAND(100) (AND substring:fo substring:oo substring:ob substring:ba substring:ar)", | public void container_and_referenced_content() {
try (Application application =
Application.fromApplicationPackage(new File("src/test/app-packages/withcontent"), Networking.disable)) {
Result result = application.getJDisc("default").search().process(new ComponentSpecification("default"),
new Query("?query=substring:foobar&timeout=20000"));
assertEquals("WEAKAND(100) (AND substring:fo substring:oo substring:ob substring:ba substring:ar)",
result.hits().get("hasQuery").getQuery().getModel().getQueryTree().toString());
}
} | class ApplicationTest {
@Test
public void minimal_application_can_be_constructed() {
try (Application application = Application.fromServicesXml("<container version=\"1.0\"/>", Networking.disable)) {
Application unused = application;
}
}
/** Tests that an application with search chains referencing a content cluster can be constructed. */
@Test
private void printTrace(Result result) {
for (String message : result.getQuery().getContext(true).getTrace().traceNode().descendants(String.class))
System.out.println(message);
}
@Test
public void empty_container() throws Exception {
try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container())))) {
try {
app.process(new DocumentRemove(null));
fail("expected exception");
} catch (Exception ignore) {
}
try {
app.process(new Processing());
fail("expected exception");
} catch (Exception ignore) {
}
try {
app.search(new Query("?foo"));
fail("expected exception");
} catch (Exception ignore) {
}
}
}
@Test
public void config() throws Exception {
try (
ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.documentProcessor("docproc", "default", MockDocproc.class)
.config(new MockApplicationConfig(new MockApplicationConfig.Builder()
.mystruct(new MockApplicationConfig.Mystruct.Builder().id("structid").value("structvalue"))
.mystructlist(new MockApplicationConfig.Mystructlist.Builder().id("listid1").value("listvalue1"))
.mystructlist(new MockApplicationConfig.Mystructlist.Builder().id("listid2").value("listvalue2"))
.mylist("item1")
.mylist("item2")
.mymap("key1", "value1")
.mymap("key2", "value2")
.mymapstruct("key1", new MockApplicationConfig.Mymapstruct.Builder().id("mapid1").value("mapvalue1"))
.mymapstruct("key2", new MockApplicationConfig.Mymapstruct.Builder().id("mapid2").value("mapvalue2")))))))
) {
MockDocproc docproc = (MockDocproc) app.getComponentById("docproc@default");
assertNotNull(docproc);
assertEquals(docproc.getConfig().mystruct().id(), "structid");
assertEquals(docproc.getConfig().mystruct().value(), "structvalue");
assertEquals(docproc.getConfig().mystructlist().size(), 2);
assertEquals(docproc.getConfig().mystructlist().get(0).id(), "listid1");
assertEquals(docproc.getConfig().mystructlist().get(0).value(), "listvalue1");
assertEquals(docproc.getConfig().mystructlist().get(1).id(), "listid2");
assertEquals(docproc.getConfig().mystructlist().get(1).value(), "listvalue2");
assertEquals(docproc.getConfig().mylist().size(), 2);
assertEquals(docproc.getConfig().mylist().get(0), "item1");
assertEquals(docproc.getConfig().mylist().get(1), "item2");
assertEquals(docproc.getConfig().mymap().size(), 2);
assertTrue(docproc.getConfig().mymap().containsKey("key1"));
assertEquals(docproc.getConfig().mymap().get("key1"), "value1");
assertTrue(docproc.getConfig().mymap().containsKey("key2"));
assertEquals(docproc.getConfig().mymap().get("key2"), "value2");
assertEquals(docproc.getConfig().mymapstruct().size(), 2);
assertTrue(docproc.getConfig().mymapstruct().containsKey("key1"));
assertEquals(docproc.getConfig().mymapstruct().get("key1").id(), "mapid1");
assertEquals(docproc.getConfig().mymapstruct().get("key1").value(), "mapvalue1");
assertTrue(docproc.getConfig().mymapstruct().containsKey("key2"));
assertEquals(docproc.getConfig().mymapstruct().get("key2").id(), "mapid2");
assertEquals(docproc.getConfig().mymapstruct().get("key2").value(), "mapvalue2");
}
}
@Test
public void handler() throws Exception {
try (
ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.handler("http:
) {
RequestHandler handler = app.getRequestHandlerById(MockHttpHandler.class.getName());
assertNotNull(handler);
Request request = new Request("http:
Response response = app.handleRequest(request);
assertNotNull(response);
assertEquals(response.getStatus(), 200);
assertEquals(response.getBodyAsString(), "OK");
request = new Request("http:
response = app.handleRequest(request);
assertNotNull(response);
assertEquals(response.getStatus(), 200);
assertEquals(response.getBodyAsString(), "OK");
request = new Request("http:
response = app.handleRequest(request);
assertNotNull(response);
assertEquals(response.getStatus(), 200);
assertEquals(response.getBodyAsString(), "OK");
}
}
@Test
public void renderer() throws Exception {
try (
ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.renderer("mock", MockRenderer.class))))
) {
Request request = new Request("http:
Response response = app.handleRequest(request);
assertNotNull(response);
assertEquals(response.getStatus(), 200);
assertEquals(response.getBodyAsString(), "<mock hits=\"0\" />");
}
}
@Test
public void search_default() throws Exception {
try (
ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.searcher(MockSearcher.class))))
) {
Result result = app.search(new Query("?query=foo&timeout=20000"));
assertEquals(1, result.hits().size());
}
}
@Test
public void search() throws Exception {
try (
ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.searcher("foo", MockSearcher.class))))
) {
Result result = app.search("foo", new Query("?query=foo&timeout=20000"));
assertEquals(1, result.hits().size());
}
}
@Test
public void document_type() throws Exception {
try (
Application app = Application.fromBuilder(new Application.Builder()
.documentType("test", new String(this.getClass().getResourceAsStream("/test.sd").readAllBytes(), StandardCharsets.UTF_8))
.container("default", new Application.Builder.Container()
.documentProcessor(MockDocproc.class)
.config(new MockApplicationConfig(new MockApplicationConfig.Builder().mystruct(new MockApplicationConfig.Mystruct.Builder().id("foo").value("bar"))))))
) {
Map<String, DocumentType> typeMap = app.getJDisc("jdisc").documentProcessing().getDocumentTypes();
assertNotNull(typeMap);
assertTrue(typeMap.containsKey("test"));
}
}
@Test
public void get_search_handler() throws Exception {
try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container().search(true))))) {
SearchHandler searchHandler = (SearchHandler) app.getRequestHandlerById("com.yahoo.search.handler.SearchHandler");
assertNotNull(searchHandler);
}
}
@Test
public void component() throws Exception {
try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.component(MockSearcher.class))))) {
Component c = app.getComponentById(MockSearcher.class.getName());
assertNotNull(c);
}
}
@Test
public void component_with_config() throws Exception {
MockApplicationConfig config = new MockApplicationConfig(new MockApplicationConfig.Builder().mystruct(new MockApplicationConfig.Mystruct.Builder().id("foo").value("bar")));
try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.component("foo", MockDocproc.class, config))))) {
Component c = app.getComponentById("foo");
assertNotNull(c);
}
}
@Test
public void file_distribution() {
try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/filedistribution/"), Networking.disable)) {
Application unused = application;
}
}
@Test
public void server() throws Exception {
try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.server("foo", MockServer.class)))
)) {
MockServer server = (MockServer) app.getServerById("foo");
assertNotNull(server);
assertTrue(server.isStarted());
}
}
@Test
public void query_profile() throws Exception {
try (Application app = Application.fromBuilder(new Application.Builder()
.queryProfile("default", "<query-profile id=\"default\">\n" +
"<field name=\"defaultage\">7d</field>\n" +
"</query-profile>")
.queryProfileType("type", "<query-profile-type id=\"type\">\n" +
"<field name=\"defaultage\" type=\"string\" />\n" +
"</query-profile-type>")
.rankExpression("re", "commonfirstphase(globalstaticrank)")
.documentType("test", new String(this.getClass().getResourceAsStream("/test.sd").readAllBytes(), StandardCharsets.UTF_8))
.container("default", new Application.Builder.Container()
.search(true)
))) {
Application unused = app;
}
}
@Test(expected = ConnectException.class)
public void http_interface_is_off_when_networking_is_disabled() throws Exception {
int httpPort = getFreePort();
try (Application application = Application.fromServicesXml(servicesXmlWithServer(httpPort), Networking.disable)) {
HttpClient client = new org.apache.http.impl.client.DefaultHttpClient();
int statusCode = client.execute(new HttpGet("http:
fail("Networking.disable is specified, but the network interface is enabled! Got status code: " + statusCode);
Application unused = application;
}
}
@Test
public void http_interface_is_on_when_networking_is_enabled() throws Exception {
int httpPort = getFreePort();
try (Application application = Application.fromServicesXml(servicesXmlWithServer(httpPort), Networking.enable)) {
HttpClient client = new org.apache.http.impl.client.DefaultHttpClient();
HttpResponse response = client.execute(new HttpGet("http:
assertEquals(200, response.getStatusLine().getStatusCode());
BufferedReader r = new BufferedReader(new InputStreamReader(response.getEntity().getContent()));
String line;
StringBuilder sb = new StringBuilder();
while ((line = r.readLine()) != null) {
sb.append(line).append("\n");
}
assertTrue(sb.toString().contains("Handler"));
Application unused = application;
}
}
@Test
public void athenz_in_deployment_xml() {
try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/athenz-in-deployment-xml/"), Networking.disable)) {
Application unused = application;
}
}
private static int getFreePort() throws IOException {
try (ServerSocket socket = new ServerSocket(0)) {
socket.setReuseAddress(true);
return socket.getLocalPort();
}
}
private static String servicesXmlWithServer(int port) {
return "<container version='1.0'>" +
" <http> <server port='" + port +"' id='foo'/> </http>" +
" <accesslog type=\"disabled\" />" +
"</container>";
}
@Test
public void application_with_access_control_can_be_constructed() {
try (Application application = Application.fromServicesXml(servicesXmlWithAccessControl(), Networking.disable)) {
Application unused = application;
}
}
private static String servicesXmlWithAccessControl() {
return "<container version='1.0'>" +
" <http> <server port='" + 0 +"' id='foo'/> " +
" <filtering>" +
" <access-control domain='foo' />" +
" </filtering>" +
" </http>" +
" <accesslog type=\"disabled\" />" +
"</container>";
}
} | class ApplicationTest {
@Test
public void minimal_application_can_be_constructed() {
try (Application application = Application.fromServicesXml("<container version=\"1.0\"/>", Networking.disable)) {
Application unused = application;
}
}
/** Tests that an application with search chains referencing a content cluster can be constructed. */
@Test
private void printTrace(Result result) {
for (String message : result.getQuery().getContext(true).getTrace().traceNode().descendants(String.class))
System.out.println(message);
}
@Test
public void empty_container() throws Exception {
try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container())))) {
try {
app.process(new DocumentRemove(null));
fail("expected exception");
} catch (Exception ignore) {
}
try {
app.process(new Processing());
fail("expected exception");
} catch (Exception ignore) {
}
try {
app.search(new Query("?foo"));
fail("expected exception");
} catch (Exception ignore) {
}
}
}
@Test
public void config() throws Exception {
try (
ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.documentProcessor("docproc", "default", MockDocproc.class)
.config(new MockApplicationConfig(new MockApplicationConfig.Builder()
.mystruct(new MockApplicationConfig.Mystruct.Builder().id("structid").value("structvalue"))
.mystructlist(new MockApplicationConfig.Mystructlist.Builder().id("listid1").value("listvalue1"))
.mystructlist(new MockApplicationConfig.Mystructlist.Builder().id("listid2").value("listvalue2"))
.mylist("item1")
.mylist("item2")
.mymap("key1", "value1")
.mymap("key2", "value2")
.mymapstruct("key1", new MockApplicationConfig.Mymapstruct.Builder().id("mapid1").value("mapvalue1"))
.mymapstruct("key2", new MockApplicationConfig.Mymapstruct.Builder().id("mapid2").value("mapvalue2")))))))
) {
MockDocproc docproc = (MockDocproc) app.getComponentById("docproc@default");
assertNotNull(docproc);
assertEquals(docproc.getConfig().mystruct().id(), "structid");
assertEquals(docproc.getConfig().mystruct().value(), "structvalue");
assertEquals(docproc.getConfig().mystructlist().size(), 2);
assertEquals(docproc.getConfig().mystructlist().get(0).id(), "listid1");
assertEquals(docproc.getConfig().mystructlist().get(0).value(), "listvalue1");
assertEquals(docproc.getConfig().mystructlist().get(1).id(), "listid2");
assertEquals(docproc.getConfig().mystructlist().get(1).value(), "listvalue2");
assertEquals(docproc.getConfig().mylist().size(), 2);
assertEquals(docproc.getConfig().mylist().get(0), "item1");
assertEquals(docproc.getConfig().mylist().get(1), "item2");
assertEquals(docproc.getConfig().mymap().size(), 2);
assertTrue(docproc.getConfig().mymap().containsKey("key1"));
assertEquals(docproc.getConfig().mymap().get("key1"), "value1");
assertTrue(docproc.getConfig().mymap().containsKey("key2"));
assertEquals(docproc.getConfig().mymap().get("key2"), "value2");
assertEquals(docproc.getConfig().mymapstruct().size(), 2);
assertTrue(docproc.getConfig().mymapstruct().containsKey("key1"));
assertEquals(docproc.getConfig().mymapstruct().get("key1").id(), "mapid1");
assertEquals(docproc.getConfig().mymapstruct().get("key1").value(), "mapvalue1");
assertTrue(docproc.getConfig().mymapstruct().containsKey("key2"));
assertEquals(docproc.getConfig().mymapstruct().get("key2").id(), "mapid2");
assertEquals(docproc.getConfig().mymapstruct().get("key2").value(), "mapvalue2");
}
}
@Test
public void handler() throws Exception {
try (
ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.handler("http:
) {
RequestHandler handler = app.getRequestHandlerById(MockHttpHandler.class.getName());
assertNotNull(handler);
Request request = new Request("http:
Response response = app.handleRequest(request);
assertNotNull(response);
assertEquals(response.getStatus(), 200);
assertEquals(response.getBodyAsString(), "OK");
request = new Request("http:
response = app.handleRequest(request);
assertNotNull(response);
assertEquals(response.getStatus(), 200);
assertEquals(response.getBodyAsString(), "OK");
request = new Request("http:
response = app.handleRequest(request);
assertNotNull(response);
assertEquals(response.getStatus(), 200);
assertEquals(response.getBodyAsString(), "OK");
}
}
@Test
public void renderer() throws Exception {
try (
ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.renderer("mock", MockRenderer.class))))
) {
Request request = new Request("http:
Response response = app.handleRequest(request);
assertNotNull(response);
assertEquals(response.getStatus(), 200);
assertEquals(response.getBodyAsString(), "<mock hits=\"0\" />");
}
}
@Test
public void search_default() throws Exception {
try (
ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.searcher(MockSearcher.class))))
) {
Result result = app.search(new Query("?query=foo&timeout=20000"));
assertEquals(1, result.hits().size());
}
}
@Test
public void search() throws Exception {
try (
ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.searcher("foo", MockSearcher.class))))
) {
Result result = app.search("foo", new Query("?query=foo&timeout=20000"));
assertEquals(1, result.hits().size());
}
}
@Test
public void document_type() throws Exception {
try (
Application app = Application.fromBuilder(new Application.Builder()
.documentType("test", new String(this.getClass().getResourceAsStream("/test.sd").readAllBytes(), StandardCharsets.UTF_8))
.container("default", new Application.Builder.Container()
.documentProcessor(MockDocproc.class)
.config(new MockApplicationConfig(new MockApplicationConfig.Builder().mystruct(new MockApplicationConfig.Mystruct.Builder().id("foo").value("bar"))))))
) {
Map<String, DocumentType> typeMap = app.getJDisc("jdisc").documentProcessing().getDocumentTypes();
assertNotNull(typeMap);
assertTrue(typeMap.containsKey("test"));
}
}
@Test
public void get_search_handler() throws Exception {
try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container().search(true))))) {
SearchHandler searchHandler = (SearchHandler) app.getRequestHandlerById("com.yahoo.search.handler.SearchHandler");
assertNotNull(searchHandler);
}
}
@Test
public void component() throws Exception {
try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.component(MockSearcher.class))))) {
Component c = app.getComponentById(MockSearcher.class.getName());
assertNotNull(c);
}
}
@Test
public void component_with_config() throws Exception {
MockApplicationConfig config = new MockApplicationConfig(new MockApplicationConfig.Builder().mystruct(new MockApplicationConfig.Mystruct.Builder().id("foo").value("bar")));
try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.component("foo", MockDocproc.class, config))))) {
Component c = app.getComponentById("foo");
assertNotNull(c);
}
}
@Test
public void file_distribution() {
try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/filedistribution/"), Networking.disable)) {
Application unused = application;
}
}
@Test
public void server() throws Exception {
try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.server("foo", MockServer.class)))
)) {
MockServer server = (MockServer) app.getServerById("foo");
assertNotNull(server);
assertTrue(server.isStarted());
}
}
@Test
public void query_profile() throws Exception {
try (Application app = Application.fromBuilder(new Application.Builder()
.queryProfile("default", "<query-profile id=\"default\">\n" +
"<field name=\"defaultage\">7d</field>\n" +
"</query-profile>")
.queryProfileType("type", "<query-profile-type id=\"type\">\n" +
"<field name=\"defaultage\" type=\"string\" />\n" +
"</query-profile-type>")
.rankExpression("re", "commonfirstphase(globalstaticrank)")
.documentType("test", new String(this.getClass().getResourceAsStream("/test.sd").readAllBytes(), StandardCharsets.UTF_8))
.container("default", new Application.Builder.Container()
.search(true)
))) {
Application unused = app;
}
}
@Test(expected = ConnectException.class)
public void http_interface_is_off_when_networking_is_disabled() throws Exception {
int httpPort = getFreePort();
try (Application application = Application.fromServicesXml(servicesXmlWithServer(httpPort), Networking.disable)) {
HttpClient client = new org.apache.http.impl.client.DefaultHttpClient();
int statusCode = client.execute(new HttpGet("http:
fail("Networking.disable is specified, but the network interface is enabled! Got status code: " + statusCode);
Application unused = application;
}
}
@Test
public void http_interface_is_on_when_networking_is_enabled() throws Exception {
int httpPort = getFreePort();
try (Application application = Application.fromServicesXml(servicesXmlWithServer(httpPort), Networking.enable)) {
HttpClient client = new org.apache.http.impl.client.DefaultHttpClient();
HttpResponse response = client.execute(new HttpGet("http:
assertEquals(200, response.getStatusLine().getStatusCode());
BufferedReader r = new BufferedReader(new InputStreamReader(response.getEntity().getContent()));
String line;
StringBuilder sb = new StringBuilder();
while ((line = r.readLine()) != null) {
sb.append(line).append("\n");
}
assertTrue(sb.toString().contains("Handler"));
Application unused = application;
}
}
@Test
public void athenz_in_deployment_xml() {
try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/athenz-in-deployment-xml/"), Networking.disable)) {
Application unused = application;
}
}
private static int getFreePort() throws IOException {
try (ServerSocket socket = new ServerSocket(0)) {
socket.setReuseAddress(true);
return socket.getLocalPort();
}
}
private static String servicesXmlWithServer(int port) {
return "<container version='1.0'>" +
" <http> <server port='" + port +"' id='foo'/> </http>" +
" <accesslog type=\"disabled\" />" +
"</container>";
}
@Test
public void application_with_access_control_can_be_constructed() {
try (Application application = Application.fromServicesXml(servicesXmlWithAccessControl(), Networking.disable)) {
Application unused = application;
}
}
private static String servicesXmlWithAccessControl() {
return "<container version='1.0'>" +
" <http> <server port='" + 0 +"' id='foo'/> " +
" <filtering>" +
" <access-control domain='foo' />" +
" </filtering>" +
" </http>" +
" <accesslog type=\"disabled\" />" +
"</container>";
}
} |
Because this is an n-gram index (for some reason) so an AND is explicitly produced. | public void container_and_referenced_content() {
try (Application application =
Application.fromApplicationPackage(new File("src/test/app-packages/withcontent"), Networking.disable)) {
Result result = application.getJDisc("default").search().process(new ComponentSpecification("default"),
new Query("?query=substring:foobar&timeout=20000"));
assertEquals("WEAKAND(100) (AND substring:fo substring:oo substring:ob substring:ba substring:ar)",
result.hits().get("hasQuery").getQuery().getModel().getQueryTree().toString());
}
} | assertEquals("WEAKAND(100) (AND substring:fo substring:oo substring:ob substring:ba substring:ar)", | public void container_and_referenced_content() {
try (Application application =
Application.fromApplicationPackage(new File("src/test/app-packages/withcontent"), Networking.disable)) {
Result result = application.getJDisc("default").search().process(new ComponentSpecification("default"),
new Query("?query=substring:foobar&timeout=20000"));
assertEquals("WEAKAND(100) (AND substring:fo substring:oo substring:ob substring:ba substring:ar)",
result.hits().get("hasQuery").getQuery().getModel().getQueryTree().toString());
}
} | class ApplicationTest {
@Test
public void minimal_application_can_be_constructed() {
try (Application application = Application.fromServicesXml("<container version=\"1.0\"/>", Networking.disable)) {
Application unused = application;
}
}
/** Tests that an application with search chains referencing a content cluster can be constructed. */
@Test
private void printTrace(Result result) {
for (String message : result.getQuery().getContext(true).getTrace().traceNode().descendants(String.class))
System.out.println(message);
}
@Test
public void empty_container() throws Exception {
try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container())))) {
try {
app.process(new DocumentRemove(null));
fail("expected exception");
} catch (Exception ignore) {
}
try {
app.process(new Processing());
fail("expected exception");
} catch (Exception ignore) {
}
try {
app.search(new Query("?foo"));
fail("expected exception");
} catch (Exception ignore) {
}
}
}
@Test
public void config() throws Exception {
try (
ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.documentProcessor("docproc", "default", MockDocproc.class)
.config(new MockApplicationConfig(new MockApplicationConfig.Builder()
.mystruct(new MockApplicationConfig.Mystruct.Builder().id("structid").value("structvalue"))
.mystructlist(new MockApplicationConfig.Mystructlist.Builder().id("listid1").value("listvalue1"))
.mystructlist(new MockApplicationConfig.Mystructlist.Builder().id("listid2").value("listvalue2"))
.mylist("item1")
.mylist("item2")
.mymap("key1", "value1")
.mymap("key2", "value2")
.mymapstruct("key1", new MockApplicationConfig.Mymapstruct.Builder().id("mapid1").value("mapvalue1"))
.mymapstruct("key2", new MockApplicationConfig.Mymapstruct.Builder().id("mapid2").value("mapvalue2")))))))
) {
MockDocproc docproc = (MockDocproc) app.getComponentById("docproc@default");
assertNotNull(docproc);
assertEquals(docproc.getConfig().mystruct().id(), "structid");
assertEquals(docproc.getConfig().mystruct().value(), "structvalue");
assertEquals(docproc.getConfig().mystructlist().size(), 2);
assertEquals(docproc.getConfig().mystructlist().get(0).id(), "listid1");
assertEquals(docproc.getConfig().mystructlist().get(0).value(), "listvalue1");
assertEquals(docproc.getConfig().mystructlist().get(1).id(), "listid2");
assertEquals(docproc.getConfig().mystructlist().get(1).value(), "listvalue2");
assertEquals(docproc.getConfig().mylist().size(), 2);
assertEquals(docproc.getConfig().mylist().get(0), "item1");
assertEquals(docproc.getConfig().mylist().get(1), "item2");
assertEquals(docproc.getConfig().mymap().size(), 2);
assertTrue(docproc.getConfig().mymap().containsKey("key1"));
assertEquals(docproc.getConfig().mymap().get("key1"), "value1");
assertTrue(docproc.getConfig().mymap().containsKey("key2"));
assertEquals(docproc.getConfig().mymap().get("key2"), "value2");
assertEquals(docproc.getConfig().mymapstruct().size(), 2);
assertTrue(docproc.getConfig().mymapstruct().containsKey("key1"));
assertEquals(docproc.getConfig().mymapstruct().get("key1").id(), "mapid1");
assertEquals(docproc.getConfig().mymapstruct().get("key1").value(), "mapvalue1");
assertTrue(docproc.getConfig().mymapstruct().containsKey("key2"));
assertEquals(docproc.getConfig().mymapstruct().get("key2").id(), "mapid2");
assertEquals(docproc.getConfig().mymapstruct().get("key2").value(), "mapvalue2");
}
}
@Test
public void handler() throws Exception {
try (
ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.handler("http:
) {
RequestHandler handler = app.getRequestHandlerById(MockHttpHandler.class.getName());
assertNotNull(handler);
Request request = new Request("http:
Response response = app.handleRequest(request);
assertNotNull(response);
assertEquals(response.getStatus(), 200);
assertEquals(response.getBodyAsString(), "OK");
request = new Request("http:
response = app.handleRequest(request);
assertNotNull(response);
assertEquals(response.getStatus(), 200);
assertEquals(response.getBodyAsString(), "OK");
request = new Request("http:
response = app.handleRequest(request);
assertNotNull(response);
assertEquals(response.getStatus(), 200);
assertEquals(response.getBodyAsString(), "OK");
}
}
@Test
public void renderer() throws Exception {
try (
ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.renderer("mock", MockRenderer.class))))
) {
Request request = new Request("http:
Response response = app.handleRequest(request);
assertNotNull(response);
assertEquals(response.getStatus(), 200);
assertEquals(response.getBodyAsString(), "<mock hits=\"0\" />");
}
}
@Test
public void search_default() throws Exception {
try (
ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.searcher(MockSearcher.class))))
) {
Result result = app.search(new Query("?query=foo&timeout=20000"));
assertEquals(1, result.hits().size());
}
}
@Test
public void search() throws Exception {
try (
ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.searcher("foo", MockSearcher.class))))
) {
Result result = app.search("foo", new Query("?query=foo&timeout=20000"));
assertEquals(1, result.hits().size());
}
}
@Test
public void document_type() throws Exception {
try (
Application app = Application.fromBuilder(new Application.Builder()
.documentType("test", new String(this.getClass().getResourceAsStream("/test.sd").readAllBytes(), StandardCharsets.UTF_8))
.container("default", new Application.Builder.Container()
.documentProcessor(MockDocproc.class)
.config(new MockApplicationConfig(new MockApplicationConfig.Builder().mystruct(new MockApplicationConfig.Mystruct.Builder().id("foo").value("bar"))))))
) {
Map<String, DocumentType> typeMap = app.getJDisc("jdisc").documentProcessing().getDocumentTypes();
assertNotNull(typeMap);
assertTrue(typeMap.containsKey("test"));
}
}
@Test
public void get_search_handler() throws Exception {
try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container().search(true))))) {
SearchHandler searchHandler = (SearchHandler) app.getRequestHandlerById("com.yahoo.search.handler.SearchHandler");
assertNotNull(searchHandler);
}
}
@Test
public void component() throws Exception {
try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.component(MockSearcher.class))))) {
Component c = app.getComponentById(MockSearcher.class.getName());
assertNotNull(c);
}
}
@Test
public void component_with_config() throws Exception {
MockApplicationConfig config = new MockApplicationConfig(new MockApplicationConfig.Builder().mystruct(new MockApplicationConfig.Mystruct.Builder().id("foo").value("bar")));
try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.component("foo", MockDocproc.class, config))))) {
Component c = app.getComponentById("foo");
assertNotNull(c);
}
}
@Test
public void file_distribution() {
try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/filedistribution/"), Networking.disable)) {
Application unused = application;
}
}
@Test
public void server() throws Exception {
try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.server("foo", MockServer.class)))
)) {
MockServer server = (MockServer) app.getServerById("foo");
assertNotNull(server);
assertTrue(server.isStarted());
}
}
@Test
public void query_profile() throws Exception {
try (Application app = Application.fromBuilder(new Application.Builder()
.queryProfile("default", "<query-profile id=\"default\">\n" +
"<field name=\"defaultage\">7d</field>\n" +
"</query-profile>")
.queryProfileType("type", "<query-profile-type id=\"type\">\n" +
"<field name=\"defaultage\" type=\"string\" />\n" +
"</query-profile-type>")
.rankExpression("re", "commonfirstphase(globalstaticrank)")
.documentType("test", new String(this.getClass().getResourceAsStream("/test.sd").readAllBytes(), StandardCharsets.UTF_8))
.container("default", new Application.Builder.Container()
.search(true)
))) {
Application unused = app;
}
}
@Test(expected = ConnectException.class)
public void http_interface_is_off_when_networking_is_disabled() throws Exception {
int httpPort = getFreePort();
try (Application application = Application.fromServicesXml(servicesXmlWithServer(httpPort), Networking.disable)) {
HttpClient client = new org.apache.http.impl.client.DefaultHttpClient();
int statusCode = client.execute(new HttpGet("http:
fail("Networking.disable is specified, but the network interface is enabled! Got status code: " + statusCode);
Application unused = application;
}
}
@Test
public void http_interface_is_on_when_networking_is_enabled() throws Exception {
int httpPort = getFreePort();
try (Application application = Application.fromServicesXml(servicesXmlWithServer(httpPort), Networking.enable)) {
HttpClient client = new org.apache.http.impl.client.DefaultHttpClient();
HttpResponse response = client.execute(new HttpGet("http:
assertEquals(200, response.getStatusLine().getStatusCode());
BufferedReader r = new BufferedReader(new InputStreamReader(response.getEntity().getContent()));
String line;
StringBuilder sb = new StringBuilder();
while ((line = r.readLine()) != null) {
sb.append(line).append("\n");
}
assertTrue(sb.toString().contains("Handler"));
Application unused = application;
}
}
@Test
public void athenz_in_deployment_xml() {
try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/athenz-in-deployment-xml/"), Networking.disable)) {
Application unused = application;
}
}
private static int getFreePort() throws IOException {
try (ServerSocket socket = new ServerSocket(0)) {
socket.setReuseAddress(true);
return socket.getLocalPort();
}
}
private static String servicesXmlWithServer(int port) {
return "<container version='1.0'>" +
" <http> <server port='" + port +"' id='foo'/> </http>" +
" <accesslog type=\"disabled\" />" +
"</container>";
}
@Test
public void application_with_access_control_can_be_constructed() {
try (Application application = Application.fromServicesXml(servicesXmlWithAccessControl(), Networking.disable)) {
Application unused = application;
}
}
private static String servicesXmlWithAccessControl() {
return "<container version='1.0'>" +
" <http> <server port='" + 0 +"' id='foo'/> " +
" <filtering>" +
" <access-control domain='foo' />" +
" </filtering>" +
" </http>" +
" <accesslog type=\"disabled\" />" +
"</container>";
}
} | class ApplicationTest {
@Test
public void minimal_application_can_be_constructed() {
try (Application application = Application.fromServicesXml("<container version=\"1.0\"/>", Networking.disable)) {
Application unused = application;
}
}
/** Tests that an application with search chains referencing a content cluster can be constructed. */
@Test
private void printTrace(Result result) {
for (String message : result.getQuery().getContext(true).getTrace().traceNode().descendants(String.class))
System.out.println(message);
}
@Test
public void empty_container() throws Exception {
try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container())))) {
try {
app.process(new DocumentRemove(null));
fail("expected exception");
} catch (Exception ignore) {
}
try {
app.process(new Processing());
fail("expected exception");
} catch (Exception ignore) {
}
try {
app.search(new Query("?foo"));
fail("expected exception");
} catch (Exception ignore) {
}
}
}
@Test
public void config() throws Exception {
try (
ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.documentProcessor("docproc", "default", MockDocproc.class)
.config(new MockApplicationConfig(new MockApplicationConfig.Builder()
.mystruct(new MockApplicationConfig.Mystruct.Builder().id("structid").value("structvalue"))
.mystructlist(new MockApplicationConfig.Mystructlist.Builder().id("listid1").value("listvalue1"))
.mystructlist(new MockApplicationConfig.Mystructlist.Builder().id("listid2").value("listvalue2"))
.mylist("item1")
.mylist("item2")
.mymap("key1", "value1")
.mymap("key2", "value2")
.mymapstruct("key1", new MockApplicationConfig.Mymapstruct.Builder().id("mapid1").value("mapvalue1"))
.mymapstruct("key2", new MockApplicationConfig.Mymapstruct.Builder().id("mapid2").value("mapvalue2")))))))
) {
MockDocproc docproc = (MockDocproc) app.getComponentById("docproc@default");
assertNotNull(docproc);
assertEquals(docproc.getConfig().mystruct().id(), "structid");
assertEquals(docproc.getConfig().mystruct().value(), "structvalue");
assertEquals(docproc.getConfig().mystructlist().size(), 2);
assertEquals(docproc.getConfig().mystructlist().get(0).id(), "listid1");
assertEquals(docproc.getConfig().mystructlist().get(0).value(), "listvalue1");
assertEquals(docproc.getConfig().mystructlist().get(1).id(), "listid2");
assertEquals(docproc.getConfig().mystructlist().get(1).value(), "listvalue2");
assertEquals(docproc.getConfig().mylist().size(), 2);
assertEquals(docproc.getConfig().mylist().get(0), "item1");
assertEquals(docproc.getConfig().mylist().get(1), "item2");
assertEquals(docproc.getConfig().mymap().size(), 2);
assertTrue(docproc.getConfig().mymap().containsKey("key1"));
assertEquals(docproc.getConfig().mymap().get("key1"), "value1");
assertTrue(docproc.getConfig().mymap().containsKey("key2"));
assertEquals(docproc.getConfig().mymap().get("key2"), "value2");
assertEquals(docproc.getConfig().mymapstruct().size(), 2);
assertTrue(docproc.getConfig().mymapstruct().containsKey("key1"));
assertEquals(docproc.getConfig().mymapstruct().get("key1").id(), "mapid1");
assertEquals(docproc.getConfig().mymapstruct().get("key1").value(), "mapvalue1");
assertTrue(docproc.getConfig().mymapstruct().containsKey("key2"));
assertEquals(docproc.getConfig().mymapstruct().get("key2").id(), "mapid2");
assertEquals(docproc.getConfig().mymapstruct().get("key2").value(), "mapvalue2");
}
}
@Test
public void handler() throws Exception {
try (
ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.handler("http:
) {
RequestHandler handler = app.getRequestHandlerById(MockHttpHandler.class.getName());
assertNotNull(handler);
Request request = new Request("http:
Response response = app.handleRequest(request);
assertNotNull(response);
assertEquals(response.getStatus(), 200);
assertEquals(response.getBodyAsString(), "OK");
request = new Request("http:
response = app.handleRequest(request);
assertNotNull(response);
assertEquals(response.getStatus(), 200);
assertEquals(response.getBodyAsString(), "OK");
request = new Request("http:
response = app.handleRequest(request);
assertNotNull(response);
assertEquals(response.getStatus(), 200);
assertEquals(response.getBodyAsString(), "OK");
}
}
@Test
public void renderer() throws Exception {
try (
ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.renderer("mock", MockRenderer.class))))
) {
Request request = new Request("http:
Response response = app.handleRequest(request);
assertNotNull(response);
assertEquals(response.getStatus(), 200);
assertEquals(response.getBodyAsString(), "<mock hits=\"0\" />");
}
}
@Test
public void search_default() throws Exception {
try (
ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.searcher(MockSearcher.class))))
) {
Result result = app.search(new Query("?query=foo&timeout=20000"));
assertEquals(1, result.hits().size());
}
}
@Test
public void search() throws Exception {
try (
ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.searcher("foo", MockSearcher.class))))
) {
Result result = app.search("foo", new Query("?query=foo&timeout=20000"));
assertEquals(1, result.hits().size());
}
}
@Test
public void document_type() throws Exception {
try (
Application app = Application.fromBuilder(new Application.Builder()
.documentType("test", new String(this.getClass().getResourceAsStream("/test.sd").readAllBytes(), StandardCharsets.UTF_8))
.container("default", new Application.Builder.Container()
.documentProcessor(MockDocproc.class)
.config(new MockApplicationConfig(new MockApplicationConfig.Builder().mystruct(new MockApplicationConfig.Mystruct.Builder().id("foo").value("bar"))))))
) {
Map<String, DocumentType> typeMap = app.getJDisc("jdisc").documentProcessing().getDocumentTypes();
assertNotNull(typeMap);
assertTrue(typeMap.containsKey("test"));
}
}
@Test
public void get_search_handler() throws Exception {
try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container().search(true))))) {
SearchHandler searchHandler = (SearchHandler) app.getRequestHandlerById("com.yahoo.search.handler.SearchHandler");
assertNotNull(searchHandler);
}
}
@Test
public void component() throws Exception {
try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.component(MockSearcher.class))))) {
Component c = app.getComponentById(MockSearcher.class.getName());
assertNotNull(c);
}
}
@Test
public void component_with_config() throws Exception {
MockApplicationConfig config = new MockApplicationConfig(new MockApplicationConfig.Builder().mystruct(new MockApplicationConfig.Mystruct.Builder().id("foo").value("bar")));
try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.component("foo", MockDocproc.class, config))))) {
Component c = app.getComponentById("foo");
assertNotNull(c);
}
}
@Test
public void file_distribution() {
try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/filedistribution/"), Networking.disable)) {
Application unused = application;
}
}
@Test
public void server() throws Exception {
try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container()
.server("foo", MockServer.class)))
)) {
MockServer server = (MockServer) app.getServerById("foo");
assertNotNull(server);
assertTrue(server.isStarted());
}
}
@Test
public void query_profile() throws Exception {
try (Application app = Application.fromBuilder(new Application.Builder()
.queryProfile("default", "<query-profile id=\"default\">\n" +
"<field name=\"defaultage\">7d</field>\n" +
"</query-profile>")
.queryProfileType("type", "<query-profile-type id=\"type\">\n" +
"<field name=\"defaultage\" type=\"string\" />\n" +
"</query-profile-type>")
.rankExpression("re", "commonfirstphase(globalstaticrank)")
.documentType("test", new String(this.getClass().getResourceAsStream("/test.sd").readAllBytes(), StandardCharsets.UTF_8))
.container("default", new Application.Builder.Container()
.search(true)
))) {
Application unused = app;
}
}
@Test(expected = ConnectException.class)
public void http_interface_is_off_when_networking_is_disabled() throws Exception {
int httpPort = getFreePort();
try (Application application = Application.fromServicesXml(servicesXmlWithServer(httpPort), Networking.disable)) {
HttpClient client = new org.apache.http.impl.client.DefaultHttpClient();
int statusCode = client.execute(new HttpGet("http:
fail("Networking.disable is specified, but the network interface is enabled! Got status code: " + statusCode);
Application unused = application;
}
}
@Test
public void http_interface_is_on_when_networking_is_enabled() throws Exception {
int httpPort = getFreePort();
try (Application application = Application.fromServicesXml(servicesXmlWithServer(httpPort), Networking.enable)) {
HttpClient client = new org.apache.http.impl.client.DefaultHttpClient();
HttpResponse response = client.execute(new HttpGet("http:
assertEquals(200, response.getStatusLine().getStatusCode());
BufferedReader r = new BufferedReader(new InputStreamReader(response.getEntity().getContent()));
String line;
StringBuilder sb = new StringBuilder();
while ((line = r.readLine()) != null) {
sb.append(line).append("\n");
}
assertTrue(sb.toString().contains("Handler"));
Application unused = application;
}
}
@Test
public void athenz_in_deployment_xml() {
try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/athenz-in-deployment-xml/"), Networking.disable)) {
Application unused = application;
}
}
private static int getFreePort() throws IOException {
try (ServerSocket socket = new ServerSocket(0)) {
socket.setReuseAddress(true);
return socket.getLocalPort();
}
}
private static String servicesXmlWithServer(int port) {
return "<container version='1.0'>" +
" <http> <server port='" + port +"' id='foo'/> </http>" +
" <accesslog type=\"disabled\" />" +
"</container>";
}
@Test
public void application_with_access_control_can_be_constructed() {
try (Application application = Application.fromServicesXml(servicesXmlWithAccessControl(), Networking.disable)) {
Application unused = application;
}
}
private static String servicesXmlWithAccessControl() {
return "<container version='1.0'>" +
" <http> <server port='" + 0 +"' id='foo'/> " +
" <filtering>" +
" <access-control domain='foo' />" +
" </filtering>" +
" </http>" +
" <accesslog type=\"disabled\" />" +
"</container>";
}
} |
I don't quite understand, IMHO, both implementations will push the first `limit` unfinished tasks into `res`, the difference is that the previous one will iterate all elements, even if the size of `res` has reached to `limit`. | public List<AgentTask> getUnfinishedTasks(int limit) {
List<AgentTask> res = Lists.newArrayList();
for (Map.Entry<Long, List<AgentTask>> entry : this.backendIdToTasks.entrySet()) {
for (AgentTask agentTask : entry.getValue()) {
if (!agentTask.isFinished()) {
res.add(agentTask);
if (res.size() >= limit) {
return res;
}
}
}
}
return res;
} | return res; | public List<AgentTask> getUnfinishedTasks(int limit) {
List<AgentTask> res = Lists.newArrayList();
if (limit == 0) {
return res;
}
for (Map.Entry<Long, List<AgentTask>> entry : this.backendIdToTasks.entrySet()) {
for (AgentTask agentTask : entry.getValue()) {
if (!agentTask.isFinished()) {
res.add(agentTask);
if (res.size() >= limit) {
return res;
}
}
}
}
return res;
} | class AgentBatchTask implements Runnable {
private static final Logger LOG = LogManager.getLogger(AgentBatchTask.class);
private final Map<Long, List<AgentTask>> backendIdToTasks;
public AgentBatchTask() {
this.backendIdToTasks = new HashMap<>();
}
public AgentBatchTask(AgentTask singleTask) {
this();
addTask(singleTask);
}
public void addTask(AgentTask agentTask) {
if (agentTask == null) {
return;
}
long backendId = agentTask.getBackendId();
List<AgentTask> tasks = backendIdToTasks.computeIfAbsent(backendId, k -> new ArrayList<>());
tasks.add(agentTask);
}
public List<AgentTask> getAllTasks() {
List<AgentTask> tasks = new ArrayList<>(getTaskNum());
for (Map.Entry<Long, List<AgentTask>> entry : this.backendIdToTasks.entrySet()) {
tasks.addAll(entry.getValue());
}
return tasks;
}
public int getTaskNum() {
int num = 0;
for (Map.Entry<Long, List<AgentTask>> entry : backendIdToTasks.entrySet()) {
num += entry.getValue().size();
}
return num;
}
public boolean isFinished() {
for (Map.Entry<Long, List<AgentTask>> entry : this.backendIdToTasks.entrySet()) {
for (AgentTask agentTask : entry.getValue()) {
if (!agentTask.isFinished()) {
return false;
}
}
}
return true;
}
public int getFinishedTaskNum() {
int count = 0;
for (Map.Entry<Long, List<AgentTask>> entry : this.backendIdToTasks.entrySet()) {
for (AgentTask agentTask : entry.getValue()) {
count += agentTask.isFinished() ? 1 : 0;
}
}
return count;
}
@Override
public void run() {
for (Long backendId : this.backendIdToTasks.keySet()) {
BackendService.Client client = null;
TNetworkAddress address = null;
boolean ok = false;
try {
Backend backend = Catalog.getCurrentSystemInfo().getBackend(backendId);
if (backend == null || !backend.isAlive()) {
continue;
}
List<AgentTask> tasks = this.backendIdToTasks.get(backendId);
address = new TNetworkAddress(backend.getHost(), backend.getBePort());
client = ClientPool.backendPool.borrowObject(address);
List<TAgentTaskRequest> agentTaskRequests = new LinkedList<TAgentTaskRequest>();
for (AgentTask task : tasks) {
agentTaskRequests.add(toAgentTaskRequest(task));
}
client.submit_tasks(agentTaskRequests);
if (LOG.isDebugEnabled()) {
for (AgentTask task : tasks) {
LOG.debug("send task: type[{}], backend[{}], signature[{}]",
task.getTaskType(), backendId, task.getSignature());
}
}
ok = true;
} catch (Exception e) {
LOG.warn("task exec error. backend[{}]", backendId, e);
} finally {
if (ok) {
ClientPool.backendPool.returnObject(address, client);
} else {
ClientPool.backendPool.invalidateObject(address, client);
}
}
}
}
private TAgentTaskRequest toAgentTaskRequest(AgentTask task) {
TAgentTaskRequest tAgentTaskRequest = new TAgentTaskRequest();
tAgentTaskRequest.setProtocol_version(TAgentServiceVersion.V1);
tAgentTaskRequest.setSignature(task.getSignature());
TTaskType taskType = task.getTaskType();
tAgentTaskRequest.setTask_type(taskType);
switch (taskType) {
case CREATE: {
CreateReplicaTask createReplicaTask = (CreateReplicaTask) task;
TCreateTabletReq request = createReplicaTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setCreate_tablet_req(request);
return tAgentTaskRequest;
}
case DROP: {
DropReplicaTask dropReplicaTask = (DropReplicaTask) task;
TDropTabletReq request = dropReplicaTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setDrop_tablet_req(request);
return tAgentTaskRequest;
}
case REALTIME_PUSH:
case PUSH: {
PushTask pushTask = (PushTask) task;
TPushReq request = pushTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setPush_req(request);
tAgentTaskRequest.setPriority(pushTask.getPriority());
return tAgentTaskRequest;
}
case CLONE: {
CloneTask cloneTask = (CloneTask) task;
TCloneReq request = cloneTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setClone_req(request);
return tAgentTaskRequest;
}
case ROLLUP: {
CreateRollupTask rollupTask = (CreateRollupTask) task;
TAlterTabletReq request = rollupTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setAlter_tablet_req(request);
tAgentTaskRequest.setResource_info(rollupTask.getResourceInfo());
return tAgentTaskRequest;
}
case SCHEMA_CHANGE: {
SchemaChangeTask schemaChangeTask = (SchemaChangeTask) task;
TAlterTabletReq request = schemaChangeTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setAlter_tablet_req(request);
tAgentTaskRequest.setResource_info(schemaChangeTask.getResourceInfo());
return tAgentTaskRequest;
}
case STORAGE_MEDIUM_MIGRATE: {
StorageMediaMigrationTask migrationTask = (StorageMediaMigrationTask) task;
TStorageMediumMigrateReq request = migrationTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setStorage_medium_migrate_req(request);
return tAgentTaskRequest;
}
case CHECK_CONSISTENCY: {
CheckConsistencyTask checkConsistencyTask = (CheckConsistencyTask) task;
TCheckConsistencyReq request = checkConsistencyTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setCheck_consistency_req(request);
return tAgentTaskRequest;
}
case MAKE_SNAPSHOT: {
SnapshotTask snapshotTask = (SnapshotTask) task;
TSnapshotRequest request = snapshotTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setSnapshot_req(request);
return tAgentTaskRequest;
}
case RELEASE_SNAPSHOT: {
ReleaseSnapshotTask releaseSnapshotTask = (ReleaseSnapshotTask) task;
TReleaseSnapshotRequest request = releaseSnapshotTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setRelease_snapshot_req(request);
return tAgentTaskRequest;
}
case UPLOAD: {
UploadTask uploadTask = (UploadTask) task;
TUploadReq request = uploadTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setUpload_req(request);
return tAgentTaskRequest;
}
case DOWNLOAD: {
DownloadTask downloadTask = (DownloadTask) task;
TDownloadReq request = downloadTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setDownload_req(request);
return tAgentTaskRequest;
}
case PUBLISH_VERSION: {
PublishVersionTask publishVersionTask = (PublishVersionTask) task;
TPublishVersionRequest request = publishVersionTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setPublish_version_req(request);
return tAgentTaskRequest;
}
case CLEAR_ALTER_TASK: {
ClearAlterTask clearAlterTask = (ClearAlterTask) task;
TClearAlterTaskRequest request = clearAlterTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setClear_alter_task_req(request);
return tAgentTaskRequest;
}
case CLEAR_TRANSACTION_TASK: {
ClearTransactionTask clearTransactionTask = (ClearTransactionTask) task;
TClearTransactionTaskRequest request = clearTransactionTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setClear_transaction_task_req(request);
return tAgentTaskRequest;
}
case MOVE: {
DirMoveTask dirMoveTask = (DirMoveTask) task;
TMoveDirReq request = dirMoveTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setMove_dir_req(request);
return tAgentTaskRequest;
}
case UPDATE_TABLET_META_INFO: {
UpdateTabletMetaInfoTask updateTabletMetaInfoTask = (UpdateTabletMetaInfoTask) task;
TUpdateTabletMetaInfoReq request = updateTabletMetaInfoTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setUpdate_tablet_meta_info_req(request);
return tAgentTaskRequest;
}
case ALTER: {
AlterReplicaTask createRollupTask = (AlterReplicaTask) task;
TAlterTabletReqV2 request = createRollupTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setAlter_tablet_req_v2(request);
return tAgentTaskRequest;
}
default:
LOG.debug("could not find task type for task [{}]", task);
return null;
}
}
} | class AgentBatchTask implements Runnable {
private static final Logger LOG = LogManager.getLogger(AgentBatchTask.class);
private final Map<Long, List<AgentTask>> backendIdToTasks;
public AgentBatchTask() {
this.backendIdToTasks = new HashMap<>();
}
public AgentBatchTask(AgentTask singleTask) {
this();
addTask(singleTask);
}
public void addTask(AgentTask agentTask) {
if (agentTask == null) {
return;
}
long backendId = agentTask.getBackendId();
List<AgentTask> tasks = backendIdToTasks.computeIfAbsent(backendId, k -> new ArrayList<>());
tasks.add(agentTask);
}
public List<AgentTask> getAllTasks() {
List<AgentTask> tasks = new ArrayList<>(getTaskNum());
for (Map.Entry<Long, List<AgentTask>> entry : this.backendIdToTasks.entrySet()) {
tasks.addAll(entry.getValue());
}
return tasks;
}
public int getTaskNum() {
int num = 0;
for (Map.Entry<Long, List<AgentTask>> entry : backendIdToTasks.entrySet()) {
num += entry.getValue().size();
}
return num;
}
public boolean isFinished() {
for (Map.Entry<Long, List<AgentTask>> entry : this.backendIdToTasks.entrySet()) {
for (AgentTask agentTask : entry.getValue()) {
if (!agentTask.isFinished()) {
return false;
}
}
}
return true;
}
public int getFinishedTaskNum() {
int count = 0;
for (Map.Entry<Long, List<AgentTask>> entry : this.backendIdToTasks.entrySet()) {
for (AgentTask agentTask : entry.getValue()) {
count += agentTask.isFinished() ? 1 : 0;
}
}
return count;
}
@Override
public void run() {
for (Long backendId : this.backendIdToTasks.keySet()) {
BackendService.Client client = null;
TNetworkAddress address = null;
boolean ok = false;
try {
Backend backend = Catalog.getCurrentSystemInfo().getBackend(backendId);
if (backend == null || !backend.isAlive()) {
continue;
}
List<AgentTask> tasks = this.backendIdToTasks.get(backendId);
address = new TNetworkAddress(backend.getHost(), backend.getBePort());
client = ClientPool.backendPool.borrowObject(address);
List<TAgentTaskRequest> agentTaskRequests = new LinkedList<TAgentTaskRequest>();
for (AgentTask task : tasks) {
agentTaskRequests.add(toAgentTaskRequest(task));
}
client.submit_tasks(agentTaskRequests);
if (LOG.isDebugEnabled()) {
for (AgentTask task : tasks) {
LOG.debug("send task: type[{}], backend[{}], signature[{}]",
task.getTaskType(), backendId, task.getSignature());
}
}
ok = true;
} catch (Exception e) {
LOG.warn("task exec error. backend[{}]", backendId, e);
} finally {
if (ok) {
ClientPool.backendPool.returnObject(address, client);
} else {
ClientPool.backendPool.invalidateObject(address, client);
}
}
}
}
private TAgentTaskRequest toAgentTaskRequest(AgentTask task) {
TAgentTaskRequest tAgentTaskRequest = new TAgentTaskRequest();
tAgentTaskRequest.setProtocol_version(TAgentServiceVersion.V1);
tAgentTaskRequest.setSignature(task.getSignature());
TTaskType taskType = task.getTaskType();
tAgentTaskRequest.setTask_type(taskType);
switch (taskType) {
case CREATE: {
CreateReplicaTask createReplicaTask = (CreateReplicaTask) task;
TCreateTabletReq request = createReplicaTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setCreate_tablet_req(request);
return tAgentTaskRequest;
}
case DROP: {
DropReplicaTask dropReplicaTask = (DropReplicaTask) task;
TDropTabletReq request = dropReplicaTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setDrop_tablet_req(request);
return tAgentTaskRequest;
}
case REALTIME_PUSH:
case PUSH: {
PushTask pushTask = (PushTask) task;
TPushReq request = pushTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setPush_req(request);
tAgentTaskRequest.setPriority(pushTask.getPriority());
return tAgentTaskRequest;
}
case CLONE: {
CloneTask cloneTask = (CloneTask) task;
TCloneReq request = cloneTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setClone_req(request);
return tAgentTaskRequest;
}
case ROLLUP: {
CreateRollupTask rollupTask = (CreateRollupTask) task;
TAlterTabletReq request = rollupTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setAlter_tablet_req(request);
tAgentTaskRequest.setResource_info(rollupTask.getResourceInfo());
return tAgentTaskRequest;
}
case SCHEMA_CHANGE: {
SchemaChangeTask schemaChangeTask = (SchemaChangeTask) task;
TAlterTabletReq request = schemaChangeTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setAlter_tablet_req(request);
tAgentTaskRequest.setResource_info(schemaChangeTask.getResourceInfo());
return tAgentTaskRequest;
}
case STORAGE_MEDIUM_MIGRATE: {
StorageMediaMigrationTask migrationTask = (StorageMediaMigrationTask) task;
TStorageMediumMigrateReq request = migrationTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setStorage_medium_migrate_req(request);
return tAgentTaskRequest;
}
case CHECK_CONSISTENCY: {
CheckConsistencyTask checkConsistencyTask = (CheckConsistencyTask) task;
TCheckConsistencyReq request = checkConsistencyTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setCheck_consistency_req(request);
return tAgentTaskRequest;
}
case MAKE_SNAPSHOT: {
SnapshotTask snapshotTask = (SnapshotTask) task;
TSnapshotRequest request = snapshotTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setSnapshot_req(request);
return tAgentTaskRequest;
}
case RELEASE_SNAPSHOT: {
ReleaseSnapshotTask releaseSnapshotTask = (ReleaseSnapshotTask) task;
TReleaseSnapshotRequest request = releaseSnapshotTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setRelease_snapshot_req(request);
return tAgentTaskRequest;
}
case UPLOAD: {
UploadTask uploadTask = (UploadTask) task;
TUploadReq request = uploadTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setUpload_req(request);
return tAgentTaskRequest;
}
case DOWNLOAD: {
DownloadTask downloadTask = (DownloadTask) task;
TDownloadReq request = downloadTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setDownload_req(request);
return tAgentTaskRequest;
}
case PUBLISH_VERSION: {
PublishVersionTask publishVersionTask = (PublishVersionTask) task;
TPublishVersionRequest request = publishVersionTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setPublish_version_req(request);
return tAgentTaskRequest;
}
case CLEAR_ALTER_TASK: {
ClearAlterTask clearAlterTask = (ClearAlterTask) task;
TClearAlterTaskRequest request = clearAlterTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setClear_alter_task_req(request);
return tAgentTaskRequest;
}
case CLEAR_TRANSACTION_TASK: {
ClearTransactionTask clearTransactionTask = (ClearTransactionTask) task;
TClearTransactionTaskRequest request = clearTransactionTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setClear_transaction_task_req(request);
return tAgentTaskRequest;
}
case MOVE: {
DirMoveTask dirMoveTask = (DirMoveTask) task;
TMoveDirReq request = dirMoveTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setMove_dir_req(request);
return tAgentTaskRequest;
}
case UPDATE_TABLET_META_INFO: {
UpdateTabletMetaInfoTask updateTabletMetaInfoTask = (UpdateTabletMetaInfoTask) task;
TUpdateTabletMetaInfoReq request = updateTabletMetaInfoTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setUpdate_tablet_meta_info_req(request);
return tAgentTaskRequest;
}
case ALTER: {
AlterReplicaTask createRollupTask = (AlterReplicaTask) task;
TAlterTabletReqV2 request = createRollupTask.toThrift();
if (LOG.isDebugEnabled()) {
LOG.debug(request.toString());
}
tAgentTaskRequest.setAlter_tablet_req_v2(request);
return tAgentTaskRequest;
}
default:
LOG.debug("could not find task type for task [{}]", task);
return null;
}
}
} |
Is Exceptions.toMessageString preferred over chaining the exceptions ? Don't you loose initial exception callstack ? | private void autoscale(ApplicationId application, NodeList applicationNodes) {
try {
nodesByCluster(applicationNodes).forEach((clusterId, clusterNodes) -> autoscale(application, clusterId, clusterNodes));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Illegal arguments for " + application + ": " + Exceptions.toMessageString(e));
}
} | throw new IllegalArgumentException("Illegal arguments for " + application + ": " + Exceptions.toMessageString(e)); | private void autoscale(ApplicationId application, NodeList applicationNodes) {
try {
nodesByCluster(applicationNodes).forEach((clusterId, clusterNodes) -> autoscale(application, clusterId, clusterNodes));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Illegal arguments for " + application, e);
}
} | class AutoscalingMaintainer extends NodeRepositoryMaintainer {
private final Autoscaler autoscaler;
private final Deployer deployer;
private final Metric metric;
public AutoscalingMaintainer(NodeRepository nodeRepository,
Deployer deployer,
Metric metric,
Duration interval) {
super(nodeRepository, interval, metric);
this.autoscaler = new Autoscaler(nodeRepository);
this.deployer = deployer;
this.metric = metric;
}
@Override
protected double maintain() {
if ( ! nodeRepository().nodes().isWorking()) return 0.0;
if ( ! nodeRepository().zone().environment().isAnyOf(Environment.dev, Environment.prod)) return 1.0;
activeNodesByApplication().forEach(this::autoscale);
return 1.0;
}
private void autoscale(ApplicationId applicationId, ClusterSpec.Id clusterId, NodeList clusterNodes) {
Optional<Application> application = nodeRepository().applications().get(applicationId);
if (application.isEmpty()) return;
Optional<Cluster> cluster = application.get().cluster(clusterId);
if (cluster.isEmpty()) return;
Cluster updatedCluster = updateCompletion(cluster.get(), clusterNodes);
var advice = autoscaler.autoscale(application.get(), updatedCluster, clusterNodes);
if (advice.isPresent() && !cluster.get().targetResources().equals(advice.target()) ||
(updatedCluster != cluster.get() || !advice.reason().equals(cluster.get().autoscalingStatus()))) {
try (var lock = nodeRepository().nodes().lock(applicationId)) {
application = nodeRepository().applications().get(applicationId);
if (application.isEmpty()) return;
cluster = application.get().cluster(clusterId);
if (cluster.isEmpty()) return;
updatedCluster = updateCompletion(cluster.get(), clusterNodes)
.with(advice.reason())
.withTarget(advice.target());
applications().put(application.get().with(updatedCluster), lock);
if (advice.isPresent() && advice.target().isPresent() && !cluster.get().targetResources().equals(advice.target())) {
ClusterResources before = new AllocatableClusterResources(clusterNodes.asList(), nodeRepository()).advertisedResources();
try (MaintenanceDeployment deployment = new MaintenanceDeployment(applicationId, deployer, metric, nodeRepository())) {
if (deployment.isValid()) {
deployment.activate();
logAutoscaling(before, advice.target().get(), applicationId, clusterNodes);
}
}
}
}
}
}
private Applications applications() {
return nodeRepository().applications();
}
/** Check if the last scaling event for this cluster has completed and if so record it in the returned instance */
private Cluster updateCompletion(Cluster cluster, NodeList clusterNodes) {
if (cluster.lastScalingEvent().isEmpty()) return cluster;
var event = cluster.lastScalingEvent().get();
if (event.completion().isPresent()) return cluster;
if (clusterNodes.retired().stream()
.anyMatch(node -> node.history().hasEventAt(History.Event.Type.retired, event.at())))
return cluster;
for (var nodeTimeseries : nodeRepository().metricsDb().getNodeTimeseries(Duration.between(event.at(), clock().instant()),
clusterNodes)) {
Optional<NodeMetricSnapshot> onNewGeneration =
nodeTimeseries.asList().stream()
.filter(snapshot -> snapshot.generation() >= event.generation()).findAny();
if (onNewGeneration.isEmpty()) return cluster;
}
Instant completionTime = nodeRepository().clock().instant();
return cluster.with(event.withCompletion(completionTime));
}
private void logAutoscaling(ClusterResources from, ClusterResources to, ApplicationId application, NodeList clusterNodes) {
log.info("Autoscaled " + application + " " + clusterNodes.clusterSpec() + ":" +
"\nfrom " + toString(from) + "\nto " + toString(to));
}
static String toString(ClusterResources r) {
return r + " (total: " + r.totalResources() + ")";
}
private Map<ClusterSpec.Id, NodeList> nodesByCluster(NodeList applicationNodes) {
return applicationNodes.groupingBy(n -> n.allocation().get().membership().cluster().id());
}
} | class AutoscalingMaintainer extends NodeRepositoryMaintainer {
private final Autoscaler autoscaler;
private final Deployer deployer;
private final Metric metric;
public AutoscalingMaintainer(NodeRepository nodeRepository,
Deployer deployer,
Metric metric,
Duration interval) {
super(nodeRepository, interval, metric);
this.autoscaler = new Autoscaler(nodeRepository);
this.deployer = deployer;
this.metric = metric;
}
@Override
protected double maintain() {
if ( ! nodeRepository().nodes().isWorking()) return 0.0;
if ( ! nodeRepository().zone().environment().isAnyOf(Environment.dev, Environment.prod)) return 1.0;
activeNodesByApplication().forEach(this::autoscale);
return 1.0;
}
private void autoscale(ApplicationId applicationId, ClusterSpec.Id clusterId, NodeList clusterNodes) {
Optional<Application> application = nodeRepository().applications().get(applicationId);
if (application.isEmpty()) return;
Optional<Cluster> cluster = application.get().cluster(clusterId);
if (cluster.isEmpty()) return;
Cluster updatedCluster = updateCompletion(cluster.get(), clusterNodes);
var advice = autoscaler.autoscale(application.get(), updatedCluster, clusterNodes);
if (advice.isPresent() && !cluster.get().targetResources().equals(advice.target()) ||
(updatedCluster != cluster.get() || !advice.reason().equals(cluster.get().autoscalingStatus()))) {
try (var lock = nodeRepository().nodes().lock(applicationId)) {
application = nodeRepository().applications().get(applicationId);
if (application.isEmpty()) return;
cluster = application.get().cluster(clusterId);
if (cluster.isEmpty()) return;
updatedCluster = updateCompletion(cluster.get(), clusterNodes)
.with(advice.reason())
.withTarget(advice.target());
applications().put(application.get().with(updatedCluster), lock);
if (advice.isPresent() && advice.target().isPresent() && !cluster.get().targetResources().equals(advice.target())) {
ClusterResources before = new AllocatableClusterResources(clusterNodes.asList(), nodeRepository()).advertisedResources();
try (MaintenanceDeployment deployment = new MaintenanceDeployment(applicationId, deployer, metric, nodeRepository())) {
if (deployment.isValid()) {
deployment.activate();
logAutoscaling(before, advice.target().get(), applicationId, clusterNodes);
}
}
}
}
}
}
private Applications applications() {
return nodeRepository().applications();
}
/** Check if the last scaling event for this cluster has completed and if so record it in the returned instance */
private Cluster updateCompletion(Cluster cluster, NodeList clusterNodes) {
if (cluster.lastScalingEvent().isEmpty()) return cluster;
var event = cluster.lastScalingEvent().get();
if (event.completion().isPresent()) return cluster;
if (clusterNodes.retired().stream()
.anyMatch(node -> node.history().hasEventAt(History.Event.Type.retired, event.at())))
return cluster;
for (var nodeTimeseries : nodeRepository().metricsDb().getNodeTimeseries(Duration.between(event.at(), clock().instant()),
clusterNodes)) {
Optional<NodeMetricSnapshot> onNewGeneration =
nodeTimeseries.asList().stream()
.filter(snapshot -> snapshot.generation() >= event.generation()).findAny();
if (onNewGeneration.isEmpty()) return cluster;
}
Instant completionTime = nodeRepository().clock().instant();
return cluster.with(event.withCompletion(completionTime));
}
private void logAutoscaling(ClusterResources from, ClusterResources to, ApplicationId application, NodeList clusterNodes) {
log.info("Autoscaled " + application + " " + clusterNodes.clusterSpec() + ":" +
"\nfrom " + toString(from) + "\nto " + toString(to));
}
static String toString(ClusterResources r) {
return r + " (total: " + r.totalResources() + ")";
}
private Map<ClusterSpec.Id, NodeList> nodesByCluster(NodeList applicationNodes) {
return applicationNodes.groupingBy(n -> n.allocation().get().membership().cluster().id());
}
} |
Probably better to chain exceptions, yes, fixed. | private void autoscale(ApplicationId application, NodeList applicationNodes) {
try {
nodesByCluster(applicationNodes).forEach((clusterId, clusterNodes) -> autoscale(application, clusterId, clusterNodes));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Illegal arguments for " + application + ": " + Exceptions.toMessageString(e));
}
} | throw new IllegalArgumentException("Illegal arguments for " + application + ": " + Exceptions.toMessageString(e)); | private void autoscale(ApplicationId application, NodeList applicationNodes) {
try {
nodesByCluster(applicationNodes).forEach((clusterId, clusterNodes) -> autoscale(application, clusterId, clusterNodes));
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Illegal arguments for " + application, e);
}
} | class AutoscalingMaintainer extends NodeRepositoryMaintainer {
private final Autoscaler autoscaler;
private final Deployer deployer;
private final Metric metric;
public AutoscalingMaintainer(NodeRepository nodeRepository,
Deployer deployer,
Metric metric,
Duration interval) {
super(nodeRepository, interval, metric);
this.autoscaler = new Autoscaler(nodeRepository);
this.deployer = deployer;
this.metric = metric;
}
@Override
protected double maintain() {
if ( ! nodeRepository().nodes().isWorking()) return 0.0;
if ( ! nodeRepository().zone().environment().isAnyOf(Environment.dev, Environment.prod)) return 1.0;
activeNodesByApplication().forEach(this::autoscale);
return 1.0;
}
private void autoscale(ApplicationId applicationId, ClusterSpec.Id clusterId, NodeList clusterNodes) {
Optional<Application> application = nodeRepository().applications().get(applicationId);
if (application.isEmpty()) return;
Optional<Cluster> cluster = application.get().cluster(clusterId);
if (cluster.isEmpty()) return;
Cluster updatedCluster = updateCompletion(cluster.get(), clusterNodes);
var advice = autoscaler.autoscale(application.get(), updatedCluster, clusterNodes);
if (advice.isPresent() && !cluster.get().targetResources().equals(advice.target()) ||
(updatedCluster != cluster.get() || !advice.reason().equals(cluster.get().autoscalingStatus()))) {
try (var lock = nodeRepository().nodes().lock(applicationId)) {
application = nodeRepository().applications().get(applicationId);
if (application.isEmpty()) return;
cluster = application.get().cluster(clusterId);
if (cluster.isEmpty()) return;
updatedCluster = updateCompletion(cluster.get(), clusterNodes)
.with(advice.reason())
.withTarget(advice.target());
applications().put(application.get().with(updatedCluster), lock);
if (advice.isPresent() && advice.target().isPresent() && !cluster.get().targetResources().equals(advice.target())) {
ClusterResources before = new AllocatableClusterResources(clusterNodes.asList(), nodeRepository()).advertisedResources();
try (MaintenanceDeployment deployment = new MaintenanceDeployment(applicationId, deployer, metric, nodeRepository())) {
if (deployment.isValid()) {
deployment.activate();
logAutoscaling(before, advice.target().get(), applicationId, clusterNodes);
}
}
}
}
}
}
private Applications applications() {
return nodeRepository().applications();
}
/** Check if the last scaling event for this cluster has completed and if so record it in the returned instance */
private Cluster updateCompletion(Cluster cluster, NodeList clusterNodes) {
if (cluster.lastScalingEvent().isEmpty()) return cluster;
var event = cluster.lastScalingEvent().get();
if (event.completion().isPresent()) return cluster;
if (clusterNodes.retired().stream()
.anyMatch(node -> node.history().hasEventAt(History.Event.Type.retired, event.at())))
return cluster;
for (var nodeTimeseries : nodeRepository().metricsDb().getNodeTimeseries(Duration.between(event.at(), clock().instant()),
clusterNodes)) {
Optional<NodeMetricSnapshot> onNewGeneration =
nodeTimeseries.asList().stream()
.filter(snapshot -> snapshot.generation() >= event.generation()).findAny();
if (onNewGeneration.isEmpty()) return cluster;
}
Instant completionTime = nodeRepository().clock().instant();
return cluster.with(event.withCompletion(completionTime));
}
private void logAutoscaling(ClusterResources from, ClusterResources to, ApplicationId application, NodeList clusterNodes) {
log.info("Autoscaled " + application + " " + clusterNodes.clusterSpec() + ":" +
"\nfrom " + toString(from) + "\nto " + toString(to));
}
static String toString(ClusterResources r) {
return r + " (total: " + r.totalResources() + ")";
}
private Map<ClusterSpec.Id, NodeList> nodesByCluster(NodeList applicationNodes) {
return applicationNodes.groupingBy(n -> n.allocation().get().membership().cluster().id());
}
} | class AutoscalingMaintainer extends NodeRepositoryMaintainer {
private final Autoscaler autoscaler;
private final Deployer deployer;
private final Metric metric;
public AutoscalingMaintainer(NodeRepository nodeRepository,
Deployer deployer,
Metric metric,
Duration interval) {
super(nodeRepository, interval, metric);
this.autoscaler = new Autoscaler(nodeRepository);
this.deployer = deployer;
this.metric = metric;
}
@Override
protected double maintain() {
if ( ! nodeRepository().nodes().isWorking()) return 0.0;
if ( ! nodeRepository().zone().environment().isAnyOf(Environment.dev, Environment.prod)) return 1.0;
activeNodesByApplication().forEach(this::autoscale);
return 1.0;
}
private void autoscale(ApplicationId applicationId, ClusterSpec.Id clusterId, NodeList clusterNodes) {
Optional<Application> application = nodeRepository().applications().get(applicationId);
if (application.isEmpty()) return;
Optional<Cluster> cluster = application.get().cluster(clusterId);
if (cluster.isEmpty()) return;
Cluster updatedCluster = updateCompletion(cluster.get(), clusterNodes);
var advice = autoscaler.autoscale(application.get(), updatedCluster, clusterNodes);
if (advice.isPresent() && !cluster.get().targetResources().equals(advice.target()) ||
(updatedCluster != cluster.get() || !advice.reason().equals(cluster.get().autoscalingStatus()))) {
try (var lock = nodeRepository().nodes().lock(applicationId)) {
application = nodeRepository().applications().get(applicationId);
if (application.isEmpty()) return;
cluster = application.get().cluster(clusterId);
if (cluster.isEmpty()) return;
updatedCluster = updateCompletion(cluster.get(), clusterNodes)
.with(advice.reason())
.withTarget(advice.target());
applications().put(application.get().with(updatedCluster), lock);
if (advice.isPresent() && advice.target().isPresent() && !cluster.get().targetResources().equals(advice.target())) {
ClusterResources before = new AllocatableClusterResources(clusterNodes.asList(), nodeRepository()).advertisedResources();
try (MaintenanceDeployment deployment = new MaintenanceDeployment(applicationId, deployer, metric, nodeRepository())) {
if (deployment.isValid()) {
deployment.activate();
logAutoscaling(before, advice.target().get(), applicationId, clusterNodes);
}
}
}
}
}
}
private Applications applications() {
return nodeRepository().applications();
}
/** Check if the last scaling event for this cluster has completed and if so record it in the returned instance */
private Cluster updateCompletion(Cluster cluster, NodeList clusterNodes) {
if (cluster.lastScalingEvent().isEmpty()) return cluster;
var event = cluster.lastScalingEvent().get();
if (event.completion().isPresent()) return cluster;
if (clusterNodes.retired().stream()
.anyMatch(node -> node.history().hasEventAt(History.Event.Type.retired, event.at())))
return cluster;
for (var nodeTimeseries : nodeRepository().metricsDb().getNodeTimeseries(Duration.between(event.at(), clock().instant()),
clusterNodes)) {
Optional<NodeMetricSnapshot> onNewGeneration =
nodeTimeseries.asList().stream()
.filter(snapshot -> snapshot.generation() >= event.generation()).findAny();
if (onNewGeneration.isEmpty()) return cluster;
}
Instant completionTime = nodeRepository().clock().instant();
return cluster.with(event.withCompletion(completionTime));
}
private void logAutoscaling(ClusterResources from, ClusterResources to, ApplicationId application, NodeList clusterNodes) {
log.info("Autoscaled " + application + " " + clusterNodes.clusterSpec() + ":" +
"\nfrom " + toString(from) + "\nto " + toString(to));
}
static String toString(ClusterResources r) {
return r + " (total: " + r.totalResources() + ")";
}
private Map<ClusterSpec.Id, NodeList> nodesByCluster(NodeList applicationNodes) {
return applicationNodes.groupingBy(n -> n.allocation().get().membership().cluster().id());
}
} |
A comment seems appropriate here. 🙂 | public ContentChannel handleResponse(Response response) {
var statusCodeGroup = response.getStatus() / 100;
if (statusCodeGroup == 2 || response.getStatus() == 412)
metrics.reportSuccessful(type, start);
else if (statusCodeGroup == 4)
metrics.reportFailure(type, DocumentOperationStatus.REQUEST_ERROR);
else if (statusCodeGroup == 5)
metrics.reportFailure(type, DocumentOperationStatus.SERVER_ERROR);
return delegate.handleResponse(response);
} | if (statusCodeGroup == 2 || response.getStatus() == 412) | public ContentChannel handleResponse(Response response) {
var statusCodeGroup = response.getStatus() / 100;
if (statusCodeGroup == 2 || response.getStatus() == 412)
metrics.reportSuccessful(type, start);
else if (statusCodeGroup == 4)
metrics.reportFailure(type, DocumentOperationStatus.REQUEST_ERROR);
else if (statusCodeGroup == 5)
metrics.reportFailure(type, DocumentOperationStatus.SERVER_ERROR);
return delegate.handleResponse(response);
} | class MeasuringResponseHandler implements ResponseHandler {
private final ResponseHandler delegate;
private final com.yahoo.documentapi.metrics.DocumentOperationType type;
private final Instant start;
private MeasuringResponseHandler(ResponseHandler delegate, com.yahoo.documentapi.metrics.DocumentOperationType type, Instant start) {
this.delegate = delegate;
this.type = type;
this.start = start;
}
@Override
} | class MeasuringResponseHandler implements ResponseHandler {
private final ResponseHandler delegate;
private final com.yahoo.documentapi.metrics.DocumentOperationType type;
private final Instant start;
private MeasuringResponseHandler(ResponseHandler delegate, com.yahoo.documentapi.metrics.DocumentOperationType type, Instant start) {
this.delegate = delegate;
this.type = type;
this.start = start;
}
@Override
} |
agreed | public ContentChannel handleResponse(Response response) {
var statusCodeGroup = response.getStatus() / 100;
if (statusCodeGroup == 2 || response.getStatus() == 412)
metrics.reportSuccessful(type, start);
else if (statusCodeGroup == 4)
metrics.reportFailure(type, DocumentOperationStatus.REQUEST_ERROR);
else if (statusCodeGroup == 5)
metrics.reportFailure(type, DocumentOperationStatus.SERVER_ERROR);
return delegate.handleResponse(response);
} | if (statusCodeGroup == 2 || response.getStatus() == 412) | public ContentChannel handleResponse(Response response) {
var statusCodeGroup = response.getStatus() / 100;
if (statusCodeGroup == 2 || response.getStatus() == 412)
metrics.reportSuccessful(type, start);
else if (statusCodeGroup == 4)
metrics.reportFailure(type, DocumentOperationStatus.REQUEST_ERROR);
else if (statusCodeGroup == 5)
metrics.reportFailure(type, DocumentOperationStatus.SERVER_ERROR);
return delegate.handleResponse(response);
} | class MeasuringResponseHandler implements ResponseHandler {
private final ResponseHandler delegate;
private final com.yahoo.documentapi.metrics.DocumentOperationType type;
private final Instant start;
private MeasuringResponseHandler(ResponseHandler delegate, com.yahoo.documentapi.metrics.DocumentOperationType type, Instant start) {
this.delegate = delegate;
this.type = type;
this.start = start;
}
@Override
} | class MeasuringResponseHandler implements ResponseHandler {
private final ResponseHandler delegate;
private final com.yahoo.documentapi.metrics.DocumentOperationType type;
private final Instant start;
private MeasuringResponseHandler(ResponseHandler delegate, com.yahoo.documentapi.metrics.DocumentOperationType type, Instant start) {
this.delegate = delegate;
this.type = type;
this.start = start;
}
@Override
} |
https://github.com/vespa-engine/vespa/pull/23120 | public ContentChannel handleResponse(Response response) {
var statusCodeGroup = response.getStatus() / 100;
if (statusCodeGroup == 2 || response.getStatus() == 412)
metrics.reportSuccessful(type, start);
else if (statusCodeGroup == 4)
metrics.reportFailure(type, DocumentOperationStatus.REQUEST_ERROR);
else if (statusCodeGroup == 5)
metrics.reportFailure(type, DocumentOperationStatus.SERVER_ERROR);
return delegate.handleResponse(response);
} | if (statusCodeGroup == 2 || response.getStatus() == 412) | public ContentChannel handleResponse(Response response) {
var statusCodeGroup = response.getStatus() / 100;
if (statusCodeGroup == 2 || response.getStatus() == 412)
metrics.reportSuccessful(type, start);
else if (statusCodeGroup == 4)
metrics.reportFailure(type, DocumentOperationStatus.REQUEST_ERROR);
else if (statusCodeGroup == 5)
metrics.reportFailure(type, DocumentOperationStatus.SERVER_ERROR);
return delegate.handleResponse(response);
} | class MeasuringResponseHandler implements ResponseHandler {
private final ResponseHandler delegate;
private final com.yahoo.documentapi.metrics.DocumentOperationType type;
private final Instant start;
private MeasuringResponseHandler(ResponseHandler delegate, com.yahoo.documentapi.metrics.DocumentOperationType type, Instant start) {
this.delegate = delegate;
this.type = type;
this.start = start;
}
@Override
} | class MeasuringResponseHandler implements ResponseHandler {
private final ResponseHandler delegate;
private final com.yahoo.documentapi.metrics.DocumentOperationType type;
private final Instant start;
private MeasuringResponseHandler(ResponseHandler delegate, com.yahoo.documentapi.metrics.DocumentOperationType type, Instant start) {
this.delegate = delegate;
this.type = type;
this.start = start;
}
@Override
} |
I guess we should go over and use nanoTime instead at some point. | protected Object sendSearchRequest(Query query, Object incomingContext) {
this.query = query;
Client.NodeConnection nodeConnection = resourcePool.getConnection(node.key());
if (nodeConnection == null) {
responses.add(Client.ResponseOrError.fromError("Could not send search to unknown node " + node.key()));
responseAvailable();
return incomingContext;
}
query.trace(false, 5, "Sending search request with jrt/protobuf to node with dist key ", node.key());
var timeout = TimeoutHelper.calculateTimeout(query);
if (timeout.timedOut()) {
responses.add(Client.ResponseOrError.fromTimeoutError("Timeout while waiting for " + getName()));
responseAvailable();
return incomingContext;
}
RpcContext context = getContext(incomingContext, timeout.request());
nodeConnection.request(RPC_METHOD,
context.compressedPayload.type(),
context.compressedPayload.uncompressedSize(),
context.compressedPayload.data(),
this,
timeout.client());
return context;
} | protected Object sendSearchRequest(Query query, Object incomingContext) {
this.query = query;
Client.NodeConnection nodeConnection = resourcePool.getConnection(node.key());
if (nodeConnection == null) {
responses.add(Client.ResponseOrError.fromError("Could not send search to unknown node " + node.key()));
responseAvailable();
return incomingContext;
}
query.trace(false, 5, "Sending search request with jrt/protobuf to node with dist key ", node.key());
var timeout = TimeoutHelper.calculateTimeout(query);
if (timeout.timedOut()) {
responses.add(Client.ResponseOrError.fromTimeoutError("Timeout before sending request to " + getName()));
responseAvailable();
return incomingContext;
}
RpcContext context = getContext(incomingContext, timeout.request());
nodeConnection.request(RPC_METHOD,
context.compressedPayload.type(),
context.compressedPayload.uncompressedSize(),
context.compressedPayload.data(),
this,
timeout.client());
return context;
} | class RpcSearchInvoker extends SearchInvoker implements Client.ResponseReceiver {
private static final String RPC_METHOD = "vespa.searchprotocol.search";
private final VespaBackEndSearcher searcher;
private final Node node;
private final RpcResourcePool resourcePool;
private final BlockingQueue<Client.ResponseOrError<ProtobufResponse>> responses;
private final int maxHits;
private Query query;
RpcSearchInvoker(VespaBackEndSearcher searcher, Node node, RpcResourcePool resourcePool, int maxHits) {
super(Optional.of(node));
this.searcher = searcher;
this.node = node;
this.resourcePool = resourcePool;
this.responses = new LinkedBlockingQueue<>(1);
this.maxHits = maxHits;
}
@Override
private RpcContext getContext(Object incomingContext, double requestTimeout) {
if (incomingContext instanceof RpcContext)
return (RpcContext)incomingContext;
return new RpcContext(resourcePool, query,
ProtobufSerialization.serializeSearchRequest(query,
Math.min(query.getHits(), maxHits),
searcher.getServerId(), requestTimeout));
}
@Override
protected InvokerResult getSearchResult(Execution execution) throws IOException {
long timeLeftMs = query.getTimeLeft();
if (timeLeftMs <= 0) {
return errorResult(query, ErrorMessage.createTimeout("Timeout while waiting for " + getName()));
}
Client.ResponseOrError<ProtobufResponse> response = null;
try {
response = responses.poll(timeLeftMs, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
}
if (response == null) {
return errorResult(query, ErrorMessage.createTimeout("Timeout while waiting for " + getName()));
}
if (response.timeout()) {
return errorResult(query, ErrorMessage.createTimeout(response.error().get()));
}
if (response.error().isPresent()) {
return errorResult(query, ErrorMessage.createBackendCommunicationError(response.error().get()));
}
if (response.response().isEmpty()) {
return errorResult(query, ErrorMessage.createInternalServerError("Neither error nor result available"));
}
ProtobufResponse protobufResponse = response.response().get();
CompressionType compression = CompressionType.valueOf(protobufResponse.compression());
byte[] payload = resourcePool.compressor().decompress(protobufResponse.compressedPayload(), compression, protobufResponse.uncompressedSize());
return ProtobufSerialization.deserializeToSearchResult(payload, query, searcher, node.pathIndex(), node.key());
}
@Override
protected void release() {
}
public void receive(Client.ResponseOrError<ProtobufResponse> response) {
responses.add(response);
responseAvailable();
}
private String getName() {
return searcher.getName();
}
static class RpcContext {
final Compressor.Compression compressedPayload;
RpcContext(RpcResourcePool resourcePool, Query query, byte[] payload) {
compressedPayload = resourcePool.compress(query, payload);
}
}
} | class RpcSearchInvoker extends SearchInvoker implements Client.ResponseReceiver {
private static final String RPC_METHOD = "vespa.searchprotocol.search";
private final VespaBackEndSearcher searcher;
private final Node node;
private final RpcResourcePool resourcePool;
private final BlockingQueue<Client.ResponseOrError<ProtobufResponse>> responses;
private final int maxHits;
private Query query;
RpcSearchInvoker(VespaBackEndSearcher searcher, Node node, RpcResourcePool resourcePool, int maxHits) {
super(Optional.of(node));
this.searcher = searcher;
this.node = node;
this.resourcePool = resourcePool;
this.responses = new LinkedBlockingQueue<>(1);
this.maxHits = maxHits;
}
@Override
private RpcContext getContext(Object incomingContext, double requestTimeout) {
if (incomingContext instanceof RpcContext)
return (RpcContext)incomingContext;
return new RpcContext(resourcePool, query,
ProtobufSerialization.serializeSearchRequest(query,
Math.min(query.getHits(), maxHits),
searcher.getServerId(), requestTimeout));
}
@Override
protected InvokerResult getSearchResult(Execution execution) throws IOException {
long timeLeftMs = query.getTimeLeft();
if (timeLeftMs <= 0) {
return errorResult(query, ErrorMessage.createTimeout("Timeout while waiting for " + getName()));
}
Client.ResponseOrError<ProtobufResponse> response = null;
try {
response = responses.poll(timeLeftMs, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
}
if (response == null) {
return errorResult(query, ErrorMessage.createTimeout("Timeout while waiting for " + getName()));
}
if (response.timeout()) {
return errorResult(query, ErrorMessage.createTimeout(response.error().get()));
}
if (response.error().isPresent()) {
return errorResult(query, ErrorMessage.createBackendCommunicationError(response.error().get()));
}
if (response.response().isEmpty()) {
return errorResult(query, ErrorMessage.createInternalServerError("Neither error nor result available"));
}
ProtobufResponse protobufResponse = response.response().get();
CompressionType compression = CompressionType.valueOf(protobufResponse.compression());
byte[] payload = resourcePool.compressor().decompress(protobufResponse.compressedPayload(), compression, protobufResponse.uncompressedSize());
return ProtobufSerialization.deserializeToSearchResult(payload, query, searcher, node.pathIndex(), node.key());
}
@Override
protected void release() {
}
public void receive(Client.ResponseOrError<ProtobufResponse> response) {
responses.add(response);
responseAvailable();
}
private String getName() {
return searcher.getName();
}
static class RpcContext {
final Compressor.Compression compressedPayload;
RpcContext(RpcResourcePool resourcePool, Query query, byte[] payload) {
compressedPayload = resourcePool.compress(query, payload);
}
}
} | |
"Timeout before sending request" ? | protected Object sendSearchRequest(Query query, Object incomingContext) {
this.query = query;
Client.NodeConnection nodeConnection = resourcePool.getConnection(node.key());
if (nodeConnection == null) {
responses.add(Client.ResponseOrError.fromError("Could not send search to unknown node " + node.key()));
responseAvailable();
return incomingContext;
}
query.trace(false, 5, "Sending search request with jrt/protobuf to node with dist key ", node.key());
var timeout = TimeoutHelper.calculateTimeout(query);
if (timeout.timedOut()) {
responses.add(Client.ResponseOrError.fromTimeoutError("Timeout while waiting for " + getName()));
responseAvailable();
return incomingContext;
}
RpcContext context = getContext(incomingContext, timeout.request());
nodeConnection.request(RPC_METHOD,
context.compressedPayload.type(),
context.compressedPayload.uncompressedSize(),
context.compressedPayload.data(),
this,
timeout.client());
return context;
} | responses.add(Client.ResponseOrError.fromTimeoutError("Timeout while waiting for " + getName())); | protected Object sendSearchRequest(Query query, Object incomingContext) {
this.query = query;
Client.NodeConnection nodeConnection = resourcePool.getConnection(node.key());
if (nodeConnection == null) {
responses.add(Client.ResponseOrError.fromError("Could not send search to unknown node " + node.key()));
responseAvailable();
return incomingContext;
}
query.trace(false, 5, "Sending search request with jrt/protobuf to node with dist key ", node.key());
var timeout = TimeoutHelper.calculateTimeout(query);
if (timeout.timedOut()) {
responses.add(Client.ResponseOrError.fromTimeoutError("Timeout before sending request to " + getName()));
responseAvailable();
return incomingContext;
}
RpcContext context = getContext(incomingContext, timeout.request());
nodeConnection.request(RPC_METHOD,
context.compressedPayload.type(),
context.compressedPayload.uncompressedSize(),
context.compressedPayload.data(),
this,
timeout.client());
return context;
} | class RpcSearchInvoker extends SearchInvoker implements Client.ResponseReceiver {
private static final String RPC_METHOD = "vespa.searchprotocol.search";
private final VespaBackEndSearcher searcher;
private final Node node;
private final RpcResourcePool resourcePool;
private final BlockingQueue<Client.ResponseOrError<ProtobufResponse>> responses;
private final int maxHits;
private Query query;
RpcSearchInvoker(VespaBackEndSearcher searcher, Node node, RpcResourcePool resourcePool, int maxHits) {
super(Optional.of(node));
this.searcher = searcher;
this.node = node;
this.resourcePool = resourcePool;
this.responses = new LinkedBlockingQueue<>(1);
this.maxHits = maxHits;
}
@Override
private RpcContext getContext(Object incomingContext, double requestTimeout) {
if (incomingContext instanceof RpcContext)
return (RpcContext)incomingContext;
return new RpcContext(resourcePool, query,
ProtobufSerialization.serializeSearchRequest(query,
Math.min(query.getHits(), maxHits),
searcher.getServerId(), requestTimeout));
}
@Override
protected InvokerResult getSearchResult(Execution execution) throws IOException {
long timeLeftMs = query.getTimeLeft();
if (timeLeftMs <= 0) {
return errorResult(query, ErrorMessage.createTimeout("Timeout while waiting for " + getName()));
}
Client.ResponseOrError<ProtobufResponse> response = null;
try {
response = responses.poll(timeLeftMs, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
}
if (response == null) {
return errorResult(query, ErrorMessage.createTimeout("Timeout while waiting for " + getName()));
}
if (response.timeout()) {
return errorResult(query, ErrorMessage.createTimeout(response.error().get()));
}
if (response.error().isPresent()) {
return errorResult(query, ErrorMessage.createBackendCommunicationError(response.error().get()));
}
if (response.response().isEmpty()) {
return errorResult(query, ErrorMessage.createInternalServerError("Neither error nor result available"));
}
ProtobufResponse protobufResponse = response.response().get();
CompressionType compression = CompressionType.valueOf(protobufResponse.compression());
byte[] payload = resourcePool.compressor().decompress(protobufResponse.compressedPayload(), compression, protobufResponse.uncompressedSize());
return ProtobufSerialization.deserializeToSearchResult(payload, query, searcher, node.pathIndex(), node.key());
}
@Override
protected void release() {
}
public void receive(Client.ResponseOrError<ProtobufResponse> response) {
responses.add(response);
responseAvailable();
}
private String getName() {
return searcher.getName();
}
static class RpcContext {
final Compressor.Compression compressedPayload;
RpcContext(RpcResourcePool resourcePool, Query query, byte[] payload) {
compressedPayload = resourcePool.compress(query, payload);
}
}
} | class RpcSearchInvoker extends SearchInvoker implements Client.ResponseReceiver {
private static final String RPC_METHOD = "vespa.searchprotocol.search";
private final VespaBackEndSearcher searcher;
private final Node node;
private final RpcResourcePool resourcePool;
private final BlockingQueue<Client.ResponseOrError<ProtobufResponse>> responses;
private final int maxHits;
private Query query;
RpcSearchInvoker(VespaBackEndSearcher searcher, Node node, RpcResourcePool resourcePool, int maxHits) {
super(Optional.of(node));
this.searcher = searcher;
this.node = node;
this.resourcePool = resourcePool;
this.responses = new LinkedBlockingQueue<>(1);
this.maxHits = maxHits;
}
@Override
private RpcContext getContext(Object incomingContext, double requestTimeout) {
if (incomingContext instanceof RpcContext)
return (RpcContext)incomingContext;
return new RpcContext(resourcePool, query,
ProtobufSerialization.serializeSearchRequest(query,
Math.min(query.getHits(), maxHits),
searcher.getServerId(), requestTimeout));
}
@Override
protected InvokerResult getSearchResult(Execution execution) throws IOException {
long timeLeftMs = query.getTimeLeft();
if (timeLeftMs <= 0) {
return errorResult(query, ErrorMessage.createTimeout("Timeout while waiting for " + getName()));
}
Client.ResponseOrError<ProtobufResponse> response = null;
try {
response = responses.poll(timeLeftMs, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
}
if (response == null) {
return errorResult(query, ErrorMessage.createTimeout("Timeout while waiting for " + getName()));
}
if (response.timeout()) {
return errorResult(query, ErrorMessage.createTimeout(response.error().get()));
}
if (response.error().isPresent()) {
return errorResult(query, ErrorMessage.createBackendCommunicationError(response.error().get()));
}
if (response.response().isEmpty()) {
return errorResult(query, ErrorMessage.createInternalServerError("Neither error nor result available"));
}
ProtobufResponse protobufResponse = response.response().get();
CompressionType compression = CompressionType.valueOf(protobufResponse.compression());
byte[] payload = resourcePool.compressor().decompress(protobufResponse.compressedPayload(), compression, protobufResponse.uncompressedSize());
return ProtobufSerialization.deserializeToSearchResult(payload, query, searcher, node.pathIndex(), node.key());
}
@Override
protected void release() {
}
public void receive(Client.ResponseOrError<ProtobufResponse> response) {
responses.add(response);
responseAvailable();
}
private String getName() {
return searcher.getName();
}
static class RpcContext {
final Compressor.Compression compressedPayload;
RpcContext(RpcResourcePool resourcePool, Query query, byte[] payload) {
compressedPayload = resourcePool.compress(query, payload);
}
}
} |
"Timed out prior to sending docsum request to " + nodeId | protected void sendFillRequest(Result result, String summaryClass) {
if (summaryClass != null) {
if (summaryClass.equals("")) {
summaryClass = null;
} else if (! documentDb.getDocsumDefinitionSet().hasDocsum(summaryClass)) {
throw new IllegalInputException("invalid presentation.summary=" + summaryClass);
}
}
ListMap<Integer, FastHit> hitsByNode = hitsByNode(result);
result.getQuery().trace(false, 5, "Sending ", hitsByNode.size(), " summary fetch requests with jrt/protobuf");
outstandingResponses = hitsByNode.size();
responses = new LinkedBlockingQueue<>(outstandingResponses);
var timeout = TimeoutHelper.calculateTimeout(result.getQuery());
if (timeout.timedOut()) {
hitsByNode.forEach((nodeId, hits) ->
receive(Client.ResponseOrError.fromTimeoutError("Timed out waiting for summary data from " + nodeId), hits));
return;
}
var builder = ProtobufSerialization.createDocsumRequestBuilder(
result.getQuery(), serverId, summaryClass, summaryNeedsQuery, timeout.request());
hitsByNode.forEach((nodeId, hits) -> {
var payload = ProtobufSerialization.serializeDocsumRequest(builder, hits);
sendDocsumsRequest(nodeId, hits, payload, result, timeout.client());
});
} | receive(Client.ResponseOrError.fromTimeoutError("Timed out waiting for summary data from " + nodeId), hits)); | protected void sendFillRequest(Result result, String summaryClass) {
if (summaryClass != null) {
if (summaryClass.equals("")) {
summaryClass = null;
} else if (! documentDb.getDocsumDefinitionSet().hasDocsum(summaryClass)) {
throw new IllegalInputException("invalid presentation.summary=" + summaryClass);
}
}
ListMap<Integer, FastHit> hitsByNode = hitsByNode(result);
result.getQuery().trace(false, 5, "Sending ", hitsByNode.size(), " summary fetch requests with jrt/protobuf");
outstandingResponses = hitsByNode.size();
responses = new LinkedBlockingQueue<>(outstandingResponses);
var timeout = TimeoutHelper.calculateTimeout(result.getQuery());
if (timeout.timedOut()) {
hitsByNode.forEach((nodeId, hits) ->
receive(Client.ResponseOrError.fromTimeoutError("Timed out prior to sending docsum request to " + nodeId), hits));
return;
}
var builder = ProtobufSerialization.createDocsumRequestBuilder(
result.getQuery(), serverId, summaryClass, summaryNeedsQuery, timeout.request());
hitsByNode.forEach((nodeId, hits) -> {
var payload = ProtobufSerialization.serializeDocsumRequest(builder, hits);
sendDocsumsRequest(nodeId, hits, payload, result, timeout.client());
});
} | class RpcProtobufFillInvoker extends FillInvoker {
private static final String RPC_METHOD = "vespa.searchprotocol.getDocsums";
private static final Logger log = Logger.getLogger(RpcProtobufFillInvoker.class.getName());
private final DocumentDatabase documentDb;
private final RpcResourcePool resourcePool;
private final boolean summaryNeedsQuery;
private final String serverId;
private BlockingQueue<Pair<Client.ResponseOrError<ProtobufResponse>, List<FastHit>>> responses;
/** Whether we have already logged/notified about an error - to avoid spamming */
private boolean hasReportedError = false;
/** The number of responses we should receive (and process) before this is complete */
private int outstandingResponses;
RpcProtobufFillInvoker(RpcResourcePool resourcePool, DocumentDatabase documentDb, String serverId, boolean summaryNeedsQuery) {
this.documentDb = documentDb;
this.resourcePool = resourcePool;
this.serverId = serverId;
this.summaryNeedsQuery = summaryNeedsQuery;
}
@Override
@Override
protected void getFillResults(Result result, String summaryClass) {
try {
processResponses(result, summaryClass);
result.hits().setSorted(false);
result.analyzeHits();
} catch (TimeoutException e) {
result.hits().addError(ErrorMessage.createTimeout("Summary data is incomplete: " + e.getMessage()));
}
}
@Override
protected void release() {
}
/** Called by a thread belonging to the client when a valid response becomes available */
public void receive(Client.ResponseOrError<ProtobufResponse> response, List<FastHit> hitsContext) {
responses.add(new Pair<>(response, hitsContext));
}
/** Return a map of hits by their search node (partition) id */
private static ListMap<Integer, FastHit> hitsByNode(Result result) {
ListMap<Integer, FastHit> hitsByNode = new ListMap<>();
for (Iterator<Hit> i = result.hits().unorderedDeepIterator(); i.hasNext();) {
Hit h = i.next();
if (!(h instanceof FastHit))
continue;
FastHit hit = (FastHit) h;
hitsByNode.put(hit.getDistributionKey(), hit);
}
return hitsByNode;
}
/** Send a docsums request to a node. Responses will be added to the given receiver. */
private void sendDocsumsRequest(int nodeId, List<FastHit> hits, byte[] payload, Result result,
double clientTimeout) {
Client.NodeConnection node = resourcePool.getConnection(nodeId);
if (node == null) {
String error = "Could not fill hits from unknown node " + nodeId;
receive(Client.ResponseOrError.fromError(error), hits);
result.hits().addError(ErrorMessage.createEmptyDocsums(error));
log.warning("Got hits with partid " + nodeId + ", which is not included in the current dispatch config");
return;
}
Query query = result.getQuery();
Compressor.Compression compressionResult = resourcePool.compress(query, payload);
node.request(RPC_METHOD, compressionResult.type(), payload.length, compressionResult.data(),
roe -> receive(roe, hits), clientTimeout);
}
private void processResponses(Result result, String summaryClass) throws TimeoutException {
try {
int skippedHits = 0;
while (outstandingResponses > 0) {
long timeLeftMs = result.getQuery().getTimeLeft();
if (timeLeftMs <= 0) {
throwTimeout();
}
var responseAndHits = responses.poll(timeLeftMs, TimeUnit.MILLISECONDS);
if (responseAndHits == null) {
throwTimeout();
}
var response = responseAndHits.getFirst();
if (response.timeout()) {
throwTimeout();
}
var hitsContext = responseAndHits.getSecond();
skippedHits += processResponse(result, response, hitsContext, summaryClass);
outstandingResponses--;
}
if (skippedHits != 0) {
result.hits().addError(ErrorMessage
.createEmptyDocsums("Missing hit summary data for summary " + summaryClass + " for " + skippedHits + " hits"));
}
} catch (InterruptedException e) {
}
}
private int processResponse(Result result, Client.ResponseOrError<ProtobufResponse> responseOrError, List<FastHit> hitsContext,
String summaryClass) {
if (responseOrError.error().isPresent()) {
if (hasReportedError) {
return 0;
}
String error = responseOrError.error().get();
result.hits().addError(ErrorMessage.createBackendCommunicationError(error));
log.log(Level.WARNING, "Error fetching summary data: " + error);
hasReportedError = true;
} else {
Client.ProtobufResponse response = responseOrError.response().get();
CompressionType compression = CompressionType.valueOf(response.compression());
byte[] responseBytes = resourcePool.compressor().decompress(response.compressedPayload(), compression,
response.uncompressedSize());
return fill(result, hitsContext, summaryClass, responseBytes);
}
return 0;
}
private void addErrors(Result result, com.yahoo.slime.Inspector errors) {
errors.traverse((ArrayTraverser) (index, value) -> {
int errorCode = ("timeout".equalsIgnoreCase(value.field("type").asString())) ? Error.TIMEOUT.code : Error.UNSPECIFIED.code;
result.hits().addError(new ErrorMessage(errorCode, value.field("message").asString(), value.field("details").asString()));
});
}
private void convertErrorsFromDocsumReply(Result target, List<SearchProtocol.Error> errors) {
for (var error : errors) {
target.hits().addError(ErrorMessage.createDocsumReplyError(error.getMessage()));
}
}
private int fill(Result result, List<FastHit> hits, String summaryClass, byte[] payload) {
try {
var protobuf = SearchProtocol.DocsumReply.parseFrom(payload);
var root = BinaryFormat.decode(protobuf.getSlimeSummaries().toByteArray()).get();
var errors = root.field("errors");
boolean hasErrors = errors.valid() && (errors.entries() > 0);
if (hasErrors) {
addErrors(result, errors);
}
convertErrorsFromDocsumReply(result, protobuf.getErrorsList());
Inspector summaries = new SlimeAdapter(root.field("docsums"));
if (!summaries.valid()) {
return 0;
}
int skippedHits = 0;
for (int i = 0; i < hits.size(); i++) {
Inspector summary = summaries.entry(i).field("docsum");
if (summary.valid()) {
hits.get(i).setField(Hit.SDDOCNAME_FIELD, documentDb.schema().name());
hits.get(i).addSummary(documentDb.getDocsumDefinitionSet().getDocsum(summaryClass), summary);
hits.get(i).setFilled(summaryClass);
} else {
skippedHits++;
}
}
return skippedHits;
} catch (InvalidProtocolBufferException ex) {
log.log(Level.WARNING, "Invalid response to docsum request", ex);
result.hits().addError(ErrorMessage.createInternalServerError("Invalid response to docsum request from backend"));
return 0;
}
}
private void throwTimeout() throws TimeoutException {
throw new TimeoutException("Timed out waiting for summary data. " + outstandingResponses + " responses outstanding.");
}
} | class RpcProtobufFillInvoker extends FillInvoker {
private static final String RPC_METHOD = "vespa.searchprotocol.getDocsums";
private static final Logger log = Logger.getLogger(RpcProtobufFillInvoker.class.getName());
private final DocumentDatabase documentDb;
private final RpcResourcePool resourcePool;
private final boolean summaryNeedsQuery;
private final String serverId;
private BlockingQueue<Pair<Client.ResponseOrError<ProtobufResponse>, List<FastHit>>> responses;
/** Whether we have already logged/notified about an error - to avoid spamming */
private boolean hasReportedError = false;
/** The number of responses we should receive (and process) before this is complete */
private int outstandingResponses;
RpcProtobufFillInvoker(RpcResourcePool resourcePool, DocumentDatabase documentDb, String serverId, boolean summaryNeedsQuery) {
this.documentDb = documentDb;
this.resourcePool = resourcePool;
this.serverId = serverId;
this.summaryNeedsQuery = summaryNeedsQuery;
}
@Override
@Override
protected void getFillResults(Result result, String summaryClass) {
try {
processResponses(result, summaryClass);
result.hits().setSorted(false);
result.analyzeHits();
} catch (TimeoutException e) {
result.hits().addError(ErrorMessage.createTimeout("Summary data is incomplete: " + e.getMessage()));
}
}
@Override
protected void release() {
}
/** Called by a thread belonging to the client when a valid response becomes available */
public void receive(Client.ResponseOrError<ProtobufResponse> response, List<FastHit> hitsContext) {
responses.add(new Pair<>(response, hitsContext));
}
/** Return a map of hits by their search node (partition) id */
private static ListMap<Integer, FastHit> hitsByNode(Result result) {
ListMap<Integer, FastHit> hitsByNode = new ListMap<>();
for (Iterator<Hit> i = result.hits().unorderedDeepIterator(); i.hasNext();) {
Hit h = i.next();
if (!(h instanceof FastHit))
continue;
FastHit hit = (FastHit) h;
hitsByNode.put(hit.getDistributionKey(), hit);
}
return hitsByNode;
}
/** Send a docsums request to a node. Responses will be added to the given receiver. */
private void sendDocsumsRequest(int nodeId, List<FastHit> hits, byte[] payload, Result result,
double clientTimeout) {
Client.NodeConnection node = resourcePool.getConnection(nodeId);
if (node == null) {
String error = "Could not fill hits from unknown node " + nodeId;
receive(Client.ResponseOrError.fromError(error), hits);
result.hits().addError(ErrorMessage.createEmptyDocsums(error));
log.warning("Got hits with partid " + nodeId + ", which is not included in the current dispatch config");
return;
}
Query query = result.getQuery();
Compressor.Compression compressionResult = resourcePool.compress(query, payload);
node.request(RPC_METHOD, compressionResult.type(), payload.length, compressionResult.data(),
roe -> receive(roe, hits), clientTimeout);
}
private void processResponses(Result result, String summaryClass) throws TimeoutException {
try {
int skippedHits = 0;
while (outstandingResponses > 0) {
long timeLeftMs = result.getQuery().getTimeLeft();
if (timeLeftMs <= 0) {
throwTimeout();
}
var responseAndHits = responses.poll(timeLeftMs, TimeUnit.MILLISECONDS);
if (responseAndHits == null) {
throwTimeout();
}
var response = responseAndHits.getFirst();
if (response.timeout()) {
throwTimeout();
}
var hitsContext = responseAndHits.getSecond();
skippedHits += processResponse(result, response, hitsContext, summaryClass);
outstandingResponses--;
}
if (skippedHits != 0) {
result.hits().addError(ErrorMessage
.createEmptyDocsums("Missing hit summary data for summary " + summaryClass + " for " + skippedHits + " hits"));
}
} catch (InterruptedException e) {
}
}
private int processResponse(Result result, Client.ResponseOrError<ProtobufResponse> responseOrError, List<FastHit> hitsContext,
String summaryClass) {
if (responseOrError.error().isPresent()) {
if (hasReportedError) {
return 0;
}
String error = responseOrError.error().get();
result.hits().addError(ErrorMessage.createBackendCommunicationError(error));
log.log(Level.WARNING, "Error fetching summary data: " + error);
hasReportedError = true;
} else {
Client.ProtobufResponse response = responseOrError.response().get();
CompressionType compression = CompressionType.valueOf(response.compression());
byte[] responseBytes = resourcePool.compressor().decompress(response.compressedPayload(), compression,
response.uncompressedSize());
return fill(result, hitsContext, summaryClass, responseBytes);
}
return 0;
}
private void addErrors(Result result, com.yahoo.slime.Inspector errors) {
errors.traverse((ArrayTraverser) (index, value) -> {
int errorCode = ("timeout".equalsIgnoreCase(value.field("type").asString())) ? Error.TIMEOUT.code : Error.UNSPECIFIED.code;
result.hits().addError(new ErrorMessage(errorCode, value.field("message").asString(), value.field("details").asString()));
});
}
private void convertErrorsFromDocsumReply(Result target, List<SearchProtocol.Error> errors) {
for (var error : errors) {
target.hits().addError(ErrorMessage.createDocsumReplyError(error.getMessage()));
}
}
private int fill(Result result, List<FastHit> hits, String summaryClass, byte[] payload) {
try {
var protobuf = SearchProtocol.DocsumReply.parseFrom(payload);
var root = BinaryFormat.decode(protobuf.getSlimeSummaries().toByteArray()).get();
var errors = root.field("errors");
boolean hasErrors = errors.valid() && (errors.entries() > 0);
if (hasErrors) {
addErrors(result, errors);
}
convertErrorsFromDocsumReply(result, protobuf.getErrorsList());
Inspector summaries = new SlimeAdapter(root.field("docsums"));
if (!summaries.valid()) {
return 0;
}
int skippedHits = 0;
for (int i = 0; i < hits.size(); i++) {
Inspector summary = summaries.entry(i).field("docsum");
if (summary.valid()) {
hits.get(i).setField(Hit.SDDOCNAME_FIELD, documentDb.schema().name());
hits.get(i).addSummary(documentDb.getDocsumDefinitionSet().getDocsum(summaryClass), summary);
hits.get(i).setFilled(summaryClass);
} else {
skippedHits++;
}
}
return skippedHits;
} catch (InvalidProtocolBufferException ex) {
log.log(Level.WARNING, "Invalid response to docsum request", ex);
result.hits().addError(ErrorMessage.createInternalServerError("Invalid response to docsum request from backend"));
return 0;
}
}
private void throwTimeout() throws TimeoutException {
throw new TimeoutException("Timed out waiting for summary data. " + outstandingResponses + " responses outstanding.");
}
} |
Nice! This went under my radar. | public void includes_zones_in_deployment_spec_when_deploying_to_staging() {
DeploymentSpec deploymentSpec = new DeploymentSpecXmlReader(true).read(
"""
<deployment version="1.0">
<instance id="default">
<prod>
<region active="true">aws-us-east-1a</region>
<region active="true">ap-northeast-1</region>
</prod>
</instance>
</deployment>
"""
);
ZoneId testZone = tester.zoneRegistry().zones().all().in(Environment.staging).zones().stream().findFirst().orElseThrow().getId();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, deploymentSpec);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(Set.copyOf(expectedCombinedSans), Set.copyOf(endpointCertificateMetadata.get().requestedDnsSans()));
} | ); | public void includes_zones_in_deployment_spec_when_deploying_to_staging() {
DeploymentSpec deploymentSpec = new DeploymentSpecXmlReader(true).read(
"""
<deployment version="1.0">
<instance id="default">
<prod>
<region active="true">aws-us-east-1a</region>
<region active="true">ap-northeast-1</region>
</prod>
</instance>
</deployment>
"""
);
ZoneId testZone = tester.zoneRegistry().zones().all().in(Environment.staging).zones().stream().findFirst().orElseThrow().getId();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, deploymentSpec);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(Set.copyOf(expectedCombinedSans), Set.copyOf(endpointCertificateMetadata.get().requestedDnsSans()));
} | class EndpointCertificatesTest {
private final ControllerTester tester = new ControllerTester();
private final SecretStoreMock secretStore = new SecretStoreMock();
private final CuratorDb mockCuratorDb = tester.curator();
private final ManualClock clock = tester.clock();
private final EndpointCertificateMock endpointCertificateMock = new EndpointCertificateMock(new ManualClock());
private final EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
private final EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
private final KeyPair testKeyPair = KeyUtils.generateKeypair(KeyAlgorithm.EC, 192);
private X509Certificate testCertificate;
private X509Certificate testCertificate2;
private static final List<String> expectedSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.global.vespa.oath.cloud",
"*.default.default.global.vespa.oath.cloud",
"default.default.aws-us-east-1a.vespa.oath.cloud",
"*.default.default.aws-us-east-1a.vespa.oath.cloud",
"default.default.us-east-1.test.vespa.oath.cloud",
"*.default.default.us-east-1.test.vespa.oath.cloud",
"default.default.us-east-3.staging.vespa.oath.cloud",
"*.default.default.us-east-3.staging.vespa.oath.cloud"
);
private static final List<String> expectedAdditionalSans = List.of(
"default.default.ap-northeast-1.vespa.oath.cloud",
"*.default.default.ap-northeast-1.vespa.oath.cloud"
);
private static final List<String> expectedCombinedSans = new ArrayList<>() {{
addAll(expectedSans);
addAll(expectedAdditionalSans);
}};
private static final List<String> expectedDevSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.us-east-1.dev.vespa.oath.cloud",
"*.default.default.us-east-1.dev.vespa.oath.cloud"
);
private X509Certificate makeTestCert(List<String> sans) {
X509CertificateBuilder x509CertificateBuilder = X509CertificateBuilder
.fromKeypair(
testKeyPair,
new X500Principal("CN=test"),
clock.instant(), clock.instant().plus(5, ChronoUnit.MINUTES),
SignatureAlgorithm.SHA256_WITH_ECDSA,
X509CertificateBuilder.generateRandomSerialNumber());
for (String san : sans) x509CertificateBuilder = x509CertificateBuilder.addSubjectAlternativeName(san);
return x509CertificateBuilder.build();
}
private final Instance testInstance = new Instance(ApplicationId.defaultId());
private final String testKeyName = "testKeyName";
private final String testCertName = "testCertName";
private ZoneId testZone;
@Before
public void setUp() {
tester.zoneRegistry().exclusiveRoutingIn(tester.zoneRegistry().zones().all().zones());
testZone = tester.zoneRegistry().zones().routingMethod(RoutingMethod.exclusive).in(Environment.prod).zones().stream().findFirst().orElseThrow().getId();
clock.setInstant(Instant.EPOCH);
testCertificate = makeTestCert(expectedSans);
testCertificate2 = makeTestCert(expectedCombinedSans);
}
@Test
public void provisions_new_certificate_in_dev() {
ZoneId testZone = tester.zoneRegistry().zones().routingMethod(RoutingMethod.exclusive).in(Environment.dev).zones().stream().findFirst().orElseThrow().getId();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedDevSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
public void provisions_new_certificate_in_prod() {
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
public void provisions_new_certificate_in_public_prod() {
ControllerTester tester = new ControllerTester(SystemName.Public);
EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
List<String> expectedSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.internal.vespa-app.cloud",
"default.default.g.vespa-app.cloud",
"*.default.default.g.vespa-app.cloud",
"default.default.aws-us-east-1a.z.vespa-app.cloud",
"*.default.default.aws-us-east-1a.z.vespa-app.cloud",
"default.default.us-east-1.test.z.vespa-app.cloud",
"*.default.default.us-east-1.test.z.vespa-app.cloud",
"default.default.us-east-3.staging.z.vespa-app.cloud",
"*.default.default.us-east-3.staging.z.vespa-app.cloud"
);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
public void reuses_stored_certificate_metadata() {
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, 7, 0, "request_id", Optional.of("leaf-request-uuid"),
List.of("vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.global.vespa.oath.cloud",
"*.default.default.global.vespa.oath.cloud",
"default.default.aws-us-east-1a.vespa.oath.cloud",
"*.default.default.aws-us-east-1a.vespa.oath.cloud"),
"", Optional.empty(), Optional.empty()));
secretStore.setSecret(testKeyName, KeyUtils.toPem(testKeyPair.getPrivate()), 7);
secretStore.setSecret(testCertName, X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), 7);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(testKeyName, endpointCertificateMetadata.get().keyName());
assertEquals(testCertName, endpointCertificateMetadata.get().certName());
assertEquals(7, endpointCertificateMetadata.get().version());
}
@Test
public void reprovisions_certificate_when_necessary() {
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, -1, 0, "root-request-uuid", Optional.of("leaf-request-uuid"), List.of(), "issuer", Optional.empty(), Optional.empty()));
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), 0);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), 0);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(endpointCertificateMetadata, mockCuratorDb.readEndpointCertificateMetadata(testInstance.id()));
}
@Test
public void reprovisions_certificate_with_added_sans_when_deploying_to_new_zone() {
ZoneId testZone = tester.zoneRegistry().zones().routingMethod(RoutingMethod.exclusive).in(Environment.prod).zones().stream().skip(1).findFirst().orElseThrow().getId();
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, -1, 0, "original-request-uuid", Optional.of("leaf-request-uuid"), expectedSans, "mockCa", Optional.empty(), Optional.empty()));
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), -1);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), -1);
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), 0);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate2) + X509CertificateUtils.toPem(testCertificate2), 0);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(endpointCertificateMetadata, mockCuratorDb.readEndpointCertificateMetadata(testInstance.id()));
assertEquals("original-request-uuid", endpointCertificateMetadata.get().rootRequestId());
assertNotEquals(Optional.of("leaf-request-uuid"), endpointCertificateMetadata.get().leafRequestId());
assertEquals(Set.copyOf(expectedCombinedSans), Set.copyOf(endpointCertificateMetadata.get().requestedDnsSans()));
}
@Test
@Test
public void includes_application_endpoint_when_declared() {
Instance instance = new Instance(ApplicationId.from("t1", "a1", "default"));
ZoneId zone1 = ZoneId.from(Environment.prod, RegionName.from("aws-us-east-1c"));
ZoneId zone2 = ZoneId.from(Environment.prod, RegionName.from("aws-us-west-2a"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,main")
.region(zone1.region())
.region(zone2.region())
.applicationEndpoint("a", "qrs", zone2.region().value(),
Map.of(InstanceName.from("beta"), 2,
InstanceName.from("main"), 8))
.applicationEndpoint("b", "qrs", zone2.region().value(),
Map.of(InstanceName.from("beta"), 1,
InstanceName.from("main"), 1))
.applicationEndpoint("c", "qrs", zone1.region().value(),
Map.of(InstanceName.from("beta"), 4,
InstanceName.from("main"), 6))
.build();
ControllerTester tester = new ControllerTester(SystemName.Public);
EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
List<String> expectedSans = List.of(
"vlfms2wpoa4nyrka2s5lktucypjtxkqhv.internal.vespa-app.cloud",
"a1.t1.g.vespa-app.cloud",
"*.a1.t1.g.vespa-app.cloud",
"a1.t1.aws-us-west-2a.r.vespa-app.cloud",
"*.a1.t1.aws-us-west-2a.r.vespa-app.cloud",
"a1.t1.aws-us-east-1c.r.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.r.vespa-app.cloud",
"a1.t1.aws-us-east-1c.z.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.z.vespa-app.cloud",
"a1.t1.us-east-1.test.z.vespa-app.cloud",
"*.a1.t1.us-east-1.test.z.vespa-app.cloud",
"a1.t1.us-east-3.staging.z.vespa-app.cloud",
"*.a1.t1.us-east-3.staging.z.vespa-app.cloud"
);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone1, applicationPackage.deploymentSpec());
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.t1.a1.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.t1.a1.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
}
} | class EndpointCertificatesTest {
private final ControllerTester tester = new ControllerTester();
private final SecretStoreMock secretStore = new SecretStoreMock();
private final CuratorDb mockCuratorDb = tester.curator();
private final ManualClock clock = tester.clock();
private final EndpointCertificateMock endpointCertificateMock = new EndpointCertificateMock(new ManualClock());
private final EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
private final EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
private final KeyPair testKeyPair = KeyUtils.generateKeypair(KeyAlgorithm.EC, 192);
private X509Certificate testCertificate;
private X509Certificate testCertificate2;
private static final List<String> expectedSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.global.vespa.oath.cloud",
"*.default.default.global.vespa.oath.cloud",
"default.default.aws-us-east-1a.vespa.oath.cloud",
"*.default.default.aws-us-east-1a.vespa.oath.cloud",
"default.default.us-east-1.test.vespa.oath.cloud",
"*.default.default.us-east-1.test.vespa.oath.cloud",
"default.default.us-east-3.staging.vespa.oath.cloud",
"*.default.default.us-east-3.staging.vespa.oath.cloud"
);
private static final List<String> expectedAdditionalSans = List.of(
"default.default.ap-northeast-1.vespa.oath.cloud",
"*.default.default.ap-northeast-1.vespa.oath.cloud"
);
private static final List<String> expectedCombinedSans = new ArrayList<>() {{
addAll(expectedSans);
addAll(expectedAdditionalSans);
}};
private static final List<String> expectedDevSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.us-east-1.dev.vespa.oath.cloud",
"*.default.default.us-east-1.dev.vespa.oath.cloud"
);
private X509Certificate makeTestCert(List<String> sans) {
X509CertificateBuilder x509CertificateBuilder = X509CertificateBuilder
.fromKeypair(
testKeyPair,
new X500Principal("CN=test"),
clock.instant(), clock.instant().plus(5, ChronoUnit.MINUTES),
SignatureAlgorithm.SHA256_WITH_ECDSA,
X509CertificateBuilder.generateRandomSerialNumber());
for (String san : sans) x509CertificateBuilder = x509CertificateBuilder.addSubjectAlternativeName(san);
return x509CertificateBuilder.build();
}
private final Instance testInstance = new Instance(ApplicationId.defaultId());
private final String testKeyName = "testKeyName";
private final String testCertName = "testCertName";
private ZoneId testZone;
@Before
public void setUp() {
tester.zoneRegistry().exclusiveRoutingIn(tester.zoneRegistry().zones().all().zones());
testZone = tester.zoneRegistry().zones().routingMethod(RoutingMethod.exclusive).in(Environment.prod).zones().stream().findFirst().orElseThrow().getId();
clock.setInstant(Instant.EPOCH);
testCertificate = makeTestCert(expectedSans);
testCertificate2 = makeTestCert(expectedCombinedSans);
}
@Test
public void provisions_new_certificate_in_dev() {
ZoneId testZone = tester.zoneRegistry().zones().routingMethod(RoutingMethod.exclusive).in(Environment.dev).zones().stream().findFirst().orElseThrow().getId();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedDevSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
public void provisions_new_certificate_in_prod() {
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
public void provisions_new_certificate_in_public_prod() {
ControllerTester tester = new ControllerTester(SystemName.Public);
EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
List<String> expectedSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.internal.vespa-app.cloud",
"default.default.g.vespa-app.cloud",
"*.default.default.g.vespa-app.cloud",
"default.default.aws-us-east-1a.z.vespa-app.cloud",
"*.default.default.aws-us-east-1a.z.vespa-app.cloud",
"default.default.us-east-1.test.z.vespa-app.cloud",
"*.default.default.us-east-1.test.z.vespa-app.cloud",
"default.default.us-east-3.staging.z.vespa-app.cloud",
"*.default.default.us-east-3.staging.z.vespa-app.cloud"
);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
public void reuses_stored_certificate_metadata() {
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, 7, 0, "request_id", Optional.of("leaf-request-uuid"),
List.of("vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.global.vespa.oath.cloud",
"*.default.default.global.vespa.oath.cloud",
"default.default.aws-us-east-1a.vespa.oath.cloud",
"*.default.default.aws-us-east-1a.vespa.oath.cloud"),
"", Optional.empty(), Optional.empty()));
secretStore.setSecret(testKeyName, KeyUtils.toPem(testKeyPair.getPrivate()), 7);
secretStore.setSecret(testCertName, X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), 7);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(testKeyName, endpointCertificateMetadata.get().keyName());
assertEquals(testCertName, endpointCertificateMetadata.get().certName());
assertEquals(7, endpointCertificateMetadata.get().version());
}
@Test
public void reprovisions_certificate_when_necessary() {
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, -1, 0, "root-request-uuid", Optional.of("leaf-request-uuid"), List.of(), "issuer", Optional.empty(), Optional.empty()));
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), 0);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), 0);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(endpointCertificateMetadata, mockCuratorDb.readEndpointCertificateMetadata(testInstance.id()));
}
@Test
public void reprovisions_certificate_with_added_sans_when_deploying_to_new_zone() {
ZoneId testZone = tester.zoneRegistry().zones().routingMethod(RoutingMethod.exclusive).in(Environment.prod).zones().stream().skip(1).findFirst().orElseThrow().getId();
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, -1, 0, "original-request-uuid", Optional.of("leaf-request-uuid"), expectedSans, "mockCa", Optional.empty(), Optional.empty()));
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), -1);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), -1);
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), 0);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate2) + X509CertificateUtils.toPem(testCertificate2), 0);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(endpointCertificateMetadata, mockCuratorDb.readEndpointCertificateMetadata(testInstance.id()));
assertEquals("original-request-uuid", endpointCertificateMetadata.get().rootRequestId());
assertNotEquals(Optional.of("leaf-request-uuid"), endpointCertificateMetadata.get().leafRequestId());
assertEquals(Set.copyOf(expectedCombinedSans), Set.copyOf(endpointCertificateMetadata.get().requestedDnsSans()));
}
@Test
@Test
public void includes_application_endpoint_when_declared() {
Instance instance = new Instance(ApplicationId.from("t1", "a1", "default"));
ZoneId zone1 = ZoneId.from(Environment.prod, RegionName.from("aws-us-east-1c"));
ZoneId zone2 = ZoneId.from(Environment.prod, RegionName.from("aws-us-west-2a"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,main")
.region(zone1.region())
.region(zone2.region())
.applicationEndpoint("a", "qrs", zone2.region().value(),
Map.of(InstanceName.from("beta"), 2,
InstanceName.from("main"), 8))
.applicationEndpoint("b", "qrs", zone2.region().value(),
Map.of(InstanceName.from("beta"), 1,
InstanceName.from("main"), 1))
.applicationEndpoint("c", "qrs", zone1.region().value(),
Map.of(InstanceName.from("beta"), 4,
InstanceName.from("main"), 6))
.build();
ControllerTester tester = new ControllerTester(SystemName.Public);
EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
List<String> expectedSans = List.of(
"vlfms2wpoa4nyrka2s5lktucypjtxkqhv.internal.vespa-app.cloud",
"a1.t1.g.vespa-app.cloud",
"*.a1.t1.g.vespa-app.cloud",
"a1.t1.aws-us-west-2a.r.vespa-app.cloud",
"*.a1.t1.aws-us-west-2a.r.vespa-app.cloud",
"a1.t1.aws-us-east-1c.r.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.r.vespa-app.cloud",
"a1.t1.aws-us-east-1c.z.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.z.vespa-app.cloud",
"a1.t1.us-east-1.test.z.vespa-app.cloud",
"*.a1.t1.us-east-1.test.z.vespa-app.cloud",
"a1.t1.us-east-3.staging.z.vespa-app.cloud",
"*.a1.t1.us-east-3.staging.z.vespa-app.cloud"
);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone1, applicationPackage.deploymentSpec());
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.t1.a1.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.t1.a1.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
}
} |
Consider matching every value as switch expression on enum is exhaustive, i.e. not matching a value is a compile error if there isn't a `default` clause. | private static Object decodePayload(Inspector entry) {
return switch (entry.type()) {
case STRING -> entry.asString();
case LONG -> entry.asLong();
case BOOL -> entry.asBool();
case DOUBLE -> entry.asDouble();
case DATA -> entry.asData();
default -> null;
};
} | default -> null; | private static Object decodePayload(Inspector entry) {
return switch (entry.type()) {
case STRING -> entry.asString();
case LONG -> entry.asLong();
case BOOL -> entry.asBool();
case DOUBLE -> entry.asDouble();
case DATA -> entry.asData();
default -> null;
};
} | class SlimeTraceDeserializer {
private final Inspector entry;
public SlimeTraceDeserializer(Inspector inspector) {
this.entry = inspector;
}
public TraceNode deserialize() {
return deserialize(entry);
}
private static TraceNode deserialize(Inspector entry) {
Object payload = decodePayload(entry.field(SlimeTraceSerializer.PAYLOAD));
long timestamp = decodeTimestamp(entry.field(SlimeTraceSerializer.TIMESTAMP));
final TraceNode node = new TraceNode(payload, timestamp);
Inspector children = entry.field(SlimeTraceSerializer.CHILDREN);
children.traverse(new ArrayTraverser() {
@Override
public void entry(int idx, Inspector inspector) {
node.add(deserialize(inspector));
}
});
return node;
}
private static long decodeTimestamp(Inspector entry) {
return entry.asLong();
}
} | class SlimeTraceDeserializer {
private final Inspector entry;
public SlimeTraceDeserializer(Inspector inspector) {
this.entry = inspector;
}
public TraceNode deserialize() {
return deserialize(entry);
}
private static TraceNode deserialize(Inspector entry) {
Object payload = decodePayload(entry.field(SlimeTraceSerializer.PAYLOAD));
long timestamp = decodeTimestamp(entry.field(SlimeTraceSerializer.TIMESTAMP));
final TraceNode node = new TraceNode(payload, timestamp);
Inspector children = entry.field(SlimeTraceSerializer.CHILDREN);
children.traverse(new ArrayTraverser() {
@Override
public void entry(int idx, Inspector inspector) {
node.add(deserialize(inspector));
}
});
return node;
}
private static long decodeTimestamp(Inspector entry) {
return entry.asLong();
}
} |
Won't this always be true if deployTime is present? ```suggestion if (deployTime.isPresent() && Instant.now().isBefore(deployTime.get().plus(Duration.ofMinutes(1)))) ``` | public HttpResponse getLogs(String logServerUri, Optional<Instant> deployTime) {
HttpGet get = new HttpGet(logServerUri);
try {
return new ProxyResponse(httpClient.execute(get));
} catch (IOException e) {
if (deployTime.isPresent() && Instant.now().isAfter(deployTime.get().minus(Duration.ofMinutes(1))))
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
outputStream.write("".getBytes(StandardCharsets.UTF_8));
}
};
return HttpErrorResponse.internalServerError("Failed to get logs: " + Exceptions.toMessageString(e));
}
} | if (deployTime.isPresent() && Instant.now().isAfter(deployTime.get().minus(Duration.ofMinutes(1)))) | public HttpResponse getLogs(String logServerUri, Optional<Instant> deployTime) {
HttpGet get = new HttpGet(logServerUri);
try {
return new ProxyResponse(httpClient.execute(get));
} catch (IOException e) {
if (deployTime.isPresent() && Instant.now().isBefore(deployTime.get().plus(Duration.ofMinutes(1))))
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
outputStream.write("".getBytes(StandardCharsets.UTF_8));
}
};
return HttpErrorResponse.internalServerError("Failed to get logs: " + Exceptions.toMessageString(e));
}
} | class LogRetriever {
private final CloseableHttpClient httpClient = VespaHttpClientBuilder.create().build();
} | class LogRetriever {
private final CloseableHttpClient httpClient = VespaHttpClientBuilder.create().build();
} |
Oh OK, you're actually not changing anything other than fixing the locking here | public Node setReady(NodeMutex nodeMutex, Agent agent, String reason) {
Node node = nodeMutex.node();
if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
if (!node.status().wantToDeprovision())
node = node.withWantToRetire(false, false, false, agent, clock.instant());
return db.writeTo(Node.State.ready, node, agent, Optional.of(reason));
} | if (!node.status().wantToDeprovision()) | public Node setReady(NodeMutex nodeMutex, Agent agent, String reason) {
Node node = nodeMutex.node();
if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
if (!node.status().wantToDeprovision())
node = node.withWantToRetire(false, false, false, agent, clock.instant());
return db.writeTo(Node.State.ready, node, agent, Optional.of(reason));
} | class Nodes {
private static final Logger log = Logger.getLogger(Nodes.class.getName());
private final CuratorDatabaseClient db;
private final Zone zone;
private final Clock clock;
private final Orchestrator orchestrator;
private final Applications applications;
public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock, Orchestrator orchestrator, Applications applications) {
this.zone = zone;
this.clock = clock;
this.db = db;
this.orchestrator = orchestrator;
this.applications = applications;
}
/** Read and write all nodes to make sure they are stored in the latest version of the serialized format */
public void rewrite() {
Instant start = clock.instant();
int nodesWritten = 0;
for (Node.State state : Node.State.values()) {
List<Node> nodes = db.readNodes(state);
db.writeTo(state, nodes, Agent.system, Optional.empty());
nodesWritten += nodes.size();
}
Instant end = clock.instant();
log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end)));
}
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> node(String hostname, Node.State... inState) {
return db.readNode(hostname, inState);
}
/**
* Returns a list of nodes in this repository in any of the given states
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
*/
public NodeList list(Node.State... inState) {
return NodeList.copyOf(db.readNodes(inState));
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(list().asList(), lock);
}
/**
* Returns whether the zone managed by this node repository seems to be working.
* If too many nodes are not responding, there is probably some zone-wide issue
* and we should probably refrain from making changes to it.
*/
public boolean isWorking() {
NodeList activeNodes = list(Node.State.active);
if (activeNodes.size() <= 5) return true;
NodeList downNodes = activeNodes.down();
return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 );
}
/** Adds a list of newly created reserved nodes to the node repository */
public List<Node> addReservedNodes(LockedNodeList nodes) {
for (Node node : nodes) {
if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
illegal("Cannot add " + node + ": This is not a child node");
if (node.allocation().isEmpty())
illegal("Cannot add " + node + ": Child nodes need to be allocated");
Optional<Node> existing = node(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists");
}
return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system);
}
/**
* Adds a list of (newly created) nodes to the node repository as provisioned nodes.
* If any of the nodes already exists in the deprovisioned state, the new node will be merged
* with the history of that node.
*/
public List<Node> addNodes(List<Node> nodes, Agent agent) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToRemove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
for (int j = 0; j < i; j++) {
if (node.equals(nodes.get(j)))
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
Optional<Node> existing = node(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != Node.State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
node = node.with(existing.get().history());
node = node.with(existing.get().reports());
node = node.with(node.status().withFailCount(existing.get().status().failCount()));
if (existing.get().status().firmwareVerifiedAt().isPresent())
node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get()));
boolean rebuilding = existing.get().status().wantToRebuild();
if (rebuilding) {
node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(),
false,
rebuilding));
}
nodesToRemove.add(existing.get());
}
nodesToAdd.add(node);
}
NestedTransaction transaction = new NestedTransaction();
List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction);
db.removeNodes(nodesToRemove, transaction);
transaction.commit();
return resultingNodes;
}
}
/** Sets a node to ready and returns the node in the ready state */
/** Reserve nodes. This method does <b>not</b> lock the node repository. */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository. */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state
* @param reusable move the node directly to {@link Node.State
*/
public void setRemovable(ApplicationId application, List<Node> nodes, boolean reusable) {
try (Mutex lock = applications.lock(application)) {
List<Node> removableNodes = nodes.stream()
.map(node -> node.with(node.allocation().get().removable(true, reusable)))
.toList();
write(removableNodes, lock);
}
}
/**
* Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) {
if ( ! zone.environment().isProduction() || zone.system().isCd())
return deallocate(nodes, Agent.application, "Deactivated by application", transaction.nested());
var stateless = NodeList.copyOf(nodes).stateless();
var stateful = NodeList.copyOf(nodes).stateful();
var statefulToInactive = stateful.not().reusable();
var statefulToDirty = stateful.reusable();
List<Node> written = new ArrayList<>();
written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested()));
written.addAll(deallocate(statefulToDirty.asList(), Agent.application, "Deactivated by application (recycled)", transaction.nested()));
written.addAll(db.writeTo(Node.State.inactive, statefulToInactive.asList(), Agent.application, Optional.empty(), transaction.nested()));
return written;
}
/**
* Fails these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) {
return fail(nodes, Agent.application, "Failed by application", transaction.nested());
}
public List<Node> fail(List<Node> nodes, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
nodes = fail(nodes, agent, reason, transaction);
transaction.commit();
return nodes;
}
private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
nodes = nodes.stream()
.map(n -> n.withWantToFail(false, agent, clock.instant()))
.collect(Collectors.toList());
return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction);
}
/** Move nodes to the dirty state */
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason));
}
public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = node(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " +
hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty)).filter(node -> node.state() != Node.State.dirty).toList();
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != Node.State.provisioned)
.filter(node -> node.state() != Node.State.failed)
.filter(node -> node.state() != Node.State.parked)
.filter(node -> node.state() != Node.State.breakfixed)
.map(Node::hostname).toList();
if ( ! hostnamesNotAllowedToDirty.isEmpty())
illegal("Could not deallocate " + nodeToDirty + ": " +
hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]");
return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList());
}
/**
* Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*/
public Node deallocate(Node node, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node deallocated = deallocate(node, agent, reason, transaction);
transaction.commit();
return deallocated;
}
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList());
}
public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) {
if (parkOnDeallocationOf(node, agent)) {
return park(node.hostname(), false, agent, reason, transaction);
} else {
Node.State toState = Node.State.dirty;
if (node.state() == Node.State.parked) {
if (node.status().wantToDeprovision()) throw new IllegalArgumentException("Cannot move " + node + " to " + toState + ": It's being deprovisioned");
if (node.status().wantToRebuild()) throw new IllegalArgumentException("Cannot move " + node + " to " + toState + ": It's being rebuilt");
}
return db.writeTo(toState, List.of(node), agent, Optional.of(reason), transaction).get(0);
}
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return fail(hostname, false, agent, reason);
}
public Node fail(String hostname, boolean wantToDeprovision, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, wantToDeprovision, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
* Non-active nodes are failed immediately, while active nodes are marked as wantToFail.
* The host is failed if it has no active nodes and marked wantToFail if it has.
*
* @return all the nodes that were changed by this request
*/
public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) {
NodeList children = list().childrenOf(hostname);
List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock));
if (children.state(Node.State.active).isEmpty())
changed.add(move(hostname, Node.State.failed, agent, false, Optional.of(reason)));
else
changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock)));
return changed;
}
private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) {
if (node.state() == Node.State.active) {
node = node.withWantToFail(true, agent, clock.instant());
write(node, lock);
return node;
} else {
return move(node.hostname(), Node.State.failed, agent, false, Optional.of(reason));
}
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean wantToDeprovision, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node parked = park(hostname, wantToDeprovision, agent, reason, transaction);
transaction.commit();
return parked;
}
private Node park(String hostname, boolean wantToDeprovision, Agent agent, String reason, NestedTransaction transaction) {
return move(hostname, Node.State.parked, agent, wantToDeprovision, Optional.of(reason), transaction);
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.active, agent, false, Optional.of(reason));
}
/**
* Moves a host to breakfixed state, removing any children.
*/
public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) {
Node node = requireNode(hostname);
try (Mutex lock = lockUnallocated()) {
requireBreakfixable(node);
NestedTransaction transaction = new NestedTransaction();
List<Node> removed = removeChildren(node, false, transaction);
removed.add(move(node.hostname(), Node.State.breakfixed, agent, false, Optional.of(reason), transaction));
transaction.commit();
return removed;
}
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child.hostname(), toState, agent, false, reason, transaction))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, false, reason, transaction));
transaction.commit();
return moved;
}
/** Move a node to given state */
private Node move(String hostname, Node.State toState, Agent agent, boolean wantToDeprovision, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
Node moved = move(hostname, toState, agent, wantToDeprovision, reason, transaction);
transaction.commit();
return moved;
}
/** Move a node to given state as part of a transaction */
private Node move(String hostname, Node.State toState, Agent agent, boolean wantToDeprovision, Optional<String> reason, NestedTransaction transaction) {
try (NodeMutex lock = lockAndGetRequired(hostname)) {
Node node = lock.node();
if (toState == Node.State.active) {
if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation");
for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
}
}
if (wantToDeprovision)
node = node.withWantToRetire(wantToDeprovision, wantToDeprovision, agent, clock.instant());
if (toState == Node.State.deprovisioned) {
node = node.with(IP.Config.EMPTY);
}
return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For Linux
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
try (NodeMutex nodeMutex = lockAndGetRequired(hostname)) {
Node node = nodeMutex.node();
if (node.type() == NodeType.tenant) {
if (node.state() != Node.State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
NestedTransaction transaction = new NestedTransaction();
db.removeNodes(List.of(node), transaction);
transaction.commit();
return node;
}
if (node.state() == Node.State.ready) return node;
Node parentHost = node.parentHostname().flatMap(this::node).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailHost(parentHost);
if (!failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
return setReady(nodeMutex, agent, reason);
}
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
Node node = requireNode(hostname);
return removeRecursively(node, false);
}
public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
requireRemovable(node, false, force);
NestedTransaction transaction = new NestedTransaction();
final List<Node> removed;
if (!node.type().isHost()) {
removed = List.of(node);
db.removeNodes(removed, transaction);
} else {
removed = removeChildren(node, force, transaction);
if (zone.cloud().dynamicProvisioning()) {
db.removeNodes(List.of(node), transaction);
} else {
move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction);
}
removed.add(node);
}
transaction.commit();
return removed;
}
}
/** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */
public void forget(Node node) {
if (node.state() != Node.State.deprovisioned)
throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten");
if (node.status().wantToRebuild())
throw new IllegalArgumentException(node + " is rebuilding and cannot be forgotten");
NestedTransaction transaction = new NestedTransaction();
db.removeNodes(List.of(node), transaction);
transaction.commit();
}
private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) {
List<Node> children = list().childrenOf(node).asList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children, transaction);
return new ArrayList<>(children);
}
/**
* Throws if the given node cannot be removed. Removal is allowed if:
* - Tenant node:
* - non-recursively: node is unallocated
* - recursively: node is unallocated or node is in failed|parked
* - Host node: iff in state provisioned|failed|parked
* - Child node:
* - non-recursively: node in state ready
* - recursively: child is in state provisioned|failed|parked|dirty|ready
*/
private void requireRemovable(Node node, boolean removingRecursively, boolean force) {
if (force) return;
if (node.type() == NodeType.tenant && node.allocation().isPresent()) {
EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (!removingRecursively || !removableStates.contains(node.state()))
illegal(node + " is currently allocated and cannot be removed while in " + node.state());
}
final Set<Node.State> removableStates;
if (node.type().isHost()) {
removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked);
} else {
removableStates = removingRecursively
? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready)
: EnumSet.of(Node.State.ready);
}
if (!removableStates.contains(node.state()))
illegal(node + " can not be removed while in " + node.state());
}
/**
* Throws if given node cannot be breakfixed.
* Breakfix is allowed if the following is true:
* - Node is tenant host
* - Node is in zone without dynamic provisioning
* - Node is in parked or failed state
*/
private void requireBreakfixable(Node node) {
if (zone.cloud().dynamicProvisioning()) {
illegal("Can not breakfix in zone: " + zone);
}
if (node.type() != NodeType.host) {
illegal(node + " can not be breakfixed as it is not a tenant host");
}
Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (! legalStates.contains(node.state())) {
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Increases the restart generation of the active nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> restartActive(Predicate<Node> filter) {
return restart(NodeFilter.in(Set.of(Node.State.active)).and(filter));
}
/**
* Increases the restart generation of the any nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> restart(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
/**
* Increases the reboot generation of the nodes matching the filter.
*
* @return the nodes in their new state
*/
public List<Node> reboot(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
/**
* Set target OS version of all nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> write(node.withWantedOsVersion(version), lock));
}
/** Retire nodes matching given filter */
public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) {
return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock));
}
/** Retire and deprovision given host and all of its children */
public List<Node> deprovision(String hostname, Agent agent, Instant instant) {
return decommission(hostname, HostOperation.deprovision, agent, instant);
}
/** Rebuild given host */
public List<Node> rebuild(String hostname, boolean soft, Agent agent, Instant instant) {
return decommission(hostname, soft ? HostOperation.softRebuild : HostOperation.rebuild, agent, instant);
}
private List<Node> decommission(String hostname, HostOperation op, Agent agent, Instant instant) {
Optional<NodeMutex> nodeMutex = lockAndGet(hostname);
if (nodeMutex.isEmpty()) return List.of();
Node host = nodeMutex.get().node();
if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host);
boolean wantToDeprovision = op == HostOperation.deprovision;
boolean wantToRebuild = op == HostOperation.rebuild || op == HostOperation.softRebuild;
boolean wantToRetire = op.needsRetirement();
List<Node> result = new ArrayList<>();
try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) {
Node newHost = lock.node().withWantToRetire(wantToRetire, wantToDeprovision, wantToRebuild, agent, instant);
result.add(write(newHost, lock));
}
if (wantToRetire) {
List<Node> updatedNodes = performOn(list().childrenOf(host), (node, nodeLock) -> {
Node newNode = node.withWantToRetire(wantToRetire, wantToDeprovision, wantToRebuild, agent, instant);
return write(newNode, nodeLock);
});
result.addAll(updatedNodes);
}
return result;
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written node for convenience
*/
public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) {
return performOn(list().matching(filter), action);
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : nodes) {
Optional<ApplicationId> applicationId = applicationIdForLock(node);
if (applicationId.isPresent())
allocatedNodes.put(applicationId.get(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = applications.lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue()) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
}
return resultingNodes;
}
public boolean canAllocateTenantNodeTo(Node host) {
return canAllocateTenantNodeTo(host, zone.cloud().dynamicProvisioning());
}
public boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) {
if ( ! host.type().canRun(NodeType.tenant)) return false;
if (host.status().wantToRetire()) return false;
if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false;
if (suspended(host)) return false;
if (dynamicProvisioning)
return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state());
else
return host.state() == Node.State.active;
}
public boolean suspended(Node node) {
try {
return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended();
} catch (HostNameNotFoundException e) {
return false;
}
}
/** Create a lock which provides exclusive rights to modifying unallocated nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Returns the unallocated/application lock, and the node acquired under that lock. */
private Optional<NodeMutex> lockAndGet(Node node, Optional<Duration> timeout) {
Node staleNode = node;
final int maxRetries = 4;
for (int i = 0; i < maxRetries; ++i) {
Mutex lockToClose = lock(staleNode, timeout);
try {
Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state());
if (freshNode.isEmpty()) {
freshNode = node(staleNode.hostname());
if (freshNode.isEmpty()) {
return Optional.empty();
}
}
if (node.type() != NodeType.tenant ||
Objects.equals(freshNode.get().allocation().map(Allocation::owner),
staleNode.allocation().map(Allocation::owner))) {
NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose);
lockToClose = null;
return Optional.of(nodeMutex);
}
staleNode = freshNode.get();
} finally {
if (lockToClose != null) lockToClose.close();
}
}
throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " +
"fetching an up to date node under lock: " + node.hostname());
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(String hostname) {
return node(hostname).flatMap(this::lockAndGet);
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(String hostname, Duration timeout) {
return node(hostname).flatMap(node -> lockAndGet(node, Optional.of(timeout)));
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(Node node) { return lockAndGet(node, Optional.empty()); }
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(Node node, Duration timeout) { return lockAndGet(node, Optional.of(timeout)); }
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(Node node) {
return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'"));
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(String hostname) {
return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(String hostname, Duration timeout) {
return lockAndGet(hostname, timeout).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
private Mutex lock(Node node, Optional<Duration> timeout) {
Optional<ApplicationId> application = applicationIdForLock(node);
if (application.isPresent())
return timeout.map(t -> applications.lock(application.get(), t))
.orElseGet(() -> applications.lock(application.get()));
else
return timeout.map(db::lockInactive).orElseGet(db::lockInactive);
}
private Node requireNode(String hostname) {
return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
/** Returns the application ID that should be used for locking when modifying this node */
private static Optional<ApplicationId> applicationIdForLock(Node node) {
return switch (node.type()) {
case tenant -> node.allocation().map(Allocation::owner);
case host -> Optional.of(InfrastructureApplication.TENANT_HOST.id());
case config -> Optional.of(InfrastructureApplication.CONFIG_SERVER.id());
case confighost -> Optional.of(InfrastructureApplication.CONFIG_SERVER_HOST.id());
case controller -> Optional.of(InfrastructureApplication.CONTROLLER.id());
case controllerhost -> Optional.of(InfrastructureApplication.CONTROLLER_HOST.id());
case proxy -> Optional.of(InfrastructureApplication.PROXY.id());
case proxyhost -> Optional.of(InfrastructureApplication.PROXY_HOST.id());
};
}
private static void illegal(String message) {
throw new IllegalArgumentException(message);
}
/** Returns whether node should be parked when deallocated by given agent */
private static boolean parkOnDeallocationOf(Node node, Agent agent) {
if (node.state() == Node.State.parked) return false;
if (agent == Agent.operator) return false;
if (node.type() == NodeType.tenant && node.status().wantToDeprovision()) return false;
boolean retirementRequestedByOperator = node.status().wantToRetire() &&
node.history().event(History.Event.Type.wantToRetire)
.map(History.Event::agent)
.map(a -> a == Agent.operator)
.orElse(false);
return node.status().wantToDeprovision() ||
node.status().wantToRebuild() ||
retirementRequestedByOperator;
}
private enum HostOperation {
/** Host is deprovisioned and data is destroyed */
deprovision(true),
/** Host is deprovisioned, the same host is later re-provisioned and data is destroyed */
rebuild(true),
/** Host is stopped and re-bootstrapped, data is preserved */
softRebuild(false);
private final boolean needsRetirement;
HostOperation(boolean needsRetirement) {
this.needsRetirement = needsRetirement;
}
/** Returns whether this operation requires the host and its children to be retired */
public boolean needsRetirement() {
return needsRetirement;
}
}
} | class Nodes {
private static final Logger log = Logger.getLogger(Nodes.class.getName());
private final CuratorDatabaseClient db;
private final Zone zone;
private final Clock clock;
private final Orchestrator orchestrator;
private final Applications applications;
public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock, Orchestrator orchestrator, Applications applications) {
this.zone = zone;
this.clock = clock;
this.db = db;
this.orchestrator = orchestrator;
this.applications = applications;
}
/** Read and write all nodes to make sure they are stored in the latest version of the serialized format */
public void rewrite() {
Instant start = clock.instant();
int nodesWritten = 0;
for (Node.State state : Node.State.values()) {
List<Node> nodes = db.readNodes(state);
db.writeTo(state, nodes, Agent.system, Optional.empty());
nodesWritten += nodes.size();
}
Instant end = clock.instant();
log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end)));
}
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> node(String hostname, Node.State... inState) {
return db.readNode(hostname, inState);
}
/**
* Returns a list of nodes in this repository in any of the given states
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
*/
public NodeList list(Node.State... inState) {
return NodeList.copyOf(db.readNodes(inState));
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(list().asList(), lock);
}
/**
* Returns whether the zone managed by this node repository seems to be working.
* If too many nodes are not responding, there is probably some zone-wide issue
* and we should probably refrain from making changes to it.
*/
public boolean isWorking() {
NodeList activeNodes = list(Node.State.active);
if (activeNodes.size() <= 5) return true;
NodeList downNodes = activeNodes.down();
return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 );
}
/** Adds a list of newly created reserved nodes to the node repository */
public List<Node> addReservedNodes(LockedNodeList nodes) {
for (Node node : nodes) {
if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
illegal("Cannot add " + node + ": This is not a child node");
if (node.allocation().isEmpty())
illegal("Cannot add " + node + ": Child nodes need to be allocated");
Optional<Node> existing = node(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists");
}
return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system);
}
/**
* Adds a list of (newly created) nodes to the node repository as provisioned nodes.
* If any of the nodes already exists in the deprovisioned state, the new node will be merged
* with the history of that node.
*/
public List<Node> addNodes(List<Node> nodes, Agent agent) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToRemove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
for (int j = 0; j < i; j++) {
if (node.equals(nodes.get(j)))
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
Optional<Node> existing = node(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != Node.State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
node = node.with(existing.get().history());
node = node.with(existing.get().reports());
node = node.with(node.status().withFailCount(existing.get().status().failCount()));
if (existing.get().status().firmwareVerifiedAt().isPresent())
node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get()));
boolean rebuilding = existing.get().status().wantToRebuild();
if (rebuilding) {
node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(),
false,
rebuilding));
}
nodesToRemove.add(existing.get());
}
nodesToAdd.add(node);
}
NestedTransaction transaction = new NestedTransaction();
List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction);
db.removeNodes(nodesToRemove, transaction);
transaction.commit();
return resultingNodes;
}
}
/** Sets a node to ready and returns the node in the ready state */
/** Reserve nodes. This method does <b>not</b> lock the node repository. */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository. */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state
* @param reusable move the node directly to {@link Node.State
*/
public void setRemovable(ApplicationId application, List<Node> nodes, boolean reusable) {
try (Mutex lock = applications.lock(application)) {
List<Node> removableNodes = nodes.stream()
.map(node -> node.with(node.allocation().get().removable(true, reusable)))
.toList();
write(removableNodes, lock);
}
}
/**
* Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) {
if ( ! zone.environment().isProduction() || zone.system().isCd())
return deallocate(nodes, Agent.application, "Deactivated by application", transaction.nested());
var stateless = NodeList.copyOf(nodes).stateless();
var stateful = NodeList.copyOf(nodes).stateful();
var statefulToInactive = stateful.not().reusable();
var statefulToDirty = stateful.reusable();
List<Node> written = new ArrayList<>();
written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested()));
written.addAll(deallocate(statefulToDirty.asList(), Agent.application, "Deactivated by application (recycled)", transaction.nested()));
written.addAll(db.writeTo(Node.State.inactive, statefulToInactive.asList(), Agent.application, Optional.empty(), transaction.nested()));
return written;
}
/**
* Fails these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) {
return fail(nodes, Agent.application, "Failed by application", transaction.nested());
}
public List<Node> fail(List<Node> nodes, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
nodes = fail(nodes, agent, reason, transaction);
transaction.commit();
return nodes;
}
private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
nodes = nodes.stream()
.map(n -> n.withWantToFail(false, agent, clock.instant()))
.collect(Collectors.toList());
return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction);
}
/** Move nodes to the dirty state */
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason));
}
public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = node(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " +
hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty)).filter(node -> node.state() != Node.State.dirty).toList();
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != Node.State.provisioned)
.filter(node -> node.state() != Node.State.failed)
.filter(node -> node.state() != Node.State.parked)
.filter(node -> node.state() != Node.State.breakfixed)
.map(Node::hostname).toList();
if ( ! hostnamesNotAllowedToDirty.isEmpty())
illegal("Could not deallocate " + nodeToDirty + ": " +
hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]");
return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList());
}
/**
* Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*/
public Node deallocate(Node node, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node deallocated = deallocate(node, agent, reason, transaction);
transaction.commit();
return deallocated;
}
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList());
}
public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) {
if (parkOnDeallocationOf(node, agent)) {
return park(node.hostname(), false, agent, reason, transaction);
} else {
Node.State toState = Node.State.dirty;
if (node.state() == Node.State.parked) {
if (node.status().wantToDeprovision()) throw new IllegalArgumentException("Cannot move " + node + " to " + toState + ": It's being deprovisioned");
if (node.status().wantToRebuild()) throw new IllegalArgumentException("Cannot move " + node + " to " + toState + ": It's being rebuilt");
}
return db.writeTo(toState, List.of(node), agent, Optional.of(reason), transaction).get(0);
}
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return fail(hostname, false, agent, reason);
}
public Node fail(String hostname, boolean wantToDeprovision, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, wantToDeprovision, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
* Non-active nodes are failed immediately, while active nodes are marked as wantToFail.
* The host is failed if it has no active nodes and marked wantToFail if it has.
*
* @return all the nodes that were changed by this request
*/
public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) {
NodeList children = list().childrenOf(hostname);
List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock));
if (children.state(Node.State.active).isEmpty())
changed.add(move(hostname, Node.State.failed, agent, false, Optional.of(reason)));
else
changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock)));
return changed;
}
private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) {
if (node.state() == Node.State.active) {
node = node.withWantToFail(true, agent, clock.instant());
write(node, lock);
return node;
} else {
return move(node.hostname(), Node.State.failed, agent, false, Optional.of(reason));
}
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean wantToDeprovision, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node parked = park(hostname, wantToDeprovision, agent, reason, transaction);
transaction.commit();
return parked;
}
private Node park(String hostname, boolean wantToDeprovision, Agent agent, String reason, NestedTransaction transaction) {
return move(hostname, Node.State.parked, agent, wantToDeprovision, Optional.of(reason), transaction);
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.active, agent, false, Optional.of(reason));
}
/**
* Moves a host to breakfixed state, removing any children.
*/
public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) {
Node node = requireNode(hostname);
try (Mutex lock = lockUnallocated()) {
requireBreakfixable(node);
NestedTransaction transaction = new NestedTransaction();
List<Node> removed = removeChildren(node, false, transaction);
removed.add(move(node.hostname(), Node.State.breakfixed, agent, false, Optional.of(reason), transaction));
transaction.commit();
return removed;
}
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child.hostname(), toState, agent, false, reason, transaction))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, false, reason, transaction));
transaction.commit();
return moved;
}
/** Move a node to given state */
private Node move(String hostname, Node.State toState, Agent agent, boolean wantToDeprovision, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
Node moved = move(hostname, toState, agent, wantToDeprovision, reason, transaction);
transaction.commit();
return moved;
}
/** Move a node to given state as part of a transaction */
private Node move(String hostname, Node.State toState, Agent agent, boolean wantToDeprovision, Optional<String> reason, NestedTransaction transaction) {
try (NodeMutex lock = lockAndGetRequired(hostname)) {
Node node = lock.node();
if (toState == Node.State.active) {
if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation");
for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
}
}
if (wantToDeprovision)
node = node.withWantToRetire(wantToDeprovision, wantToDeprovision, agent, clock.instant());
if (toState == Node.State.deprovisioned) {
node = node.with(IP.Config.EMPTY);
}
return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For Linux
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
try (NodeMutex nodeMutex = lockAndGetRequired(hostname)) {
Node node = nodeMutex.node();
if (node.type() == NodeType.tenant) {
if (node.state() != Node.State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
NestedTransaction transaction = new NestedTransaction();
db.removeNodes(List.of(node), transaction);
transaction.commit();
return node;
}
if (node.state() == Node.State.ready) return node;
Node parentHost = node.parentHostname().flatMap(this::node).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailHost(parentHost);
if (!failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
return setReady(nodeMutex, agent, reason);
}
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
Node node = requireNode(hostname);
return removeRecursively(node, false);
}
public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
requireRemovable(node, false, force);
NestedTransaction transaction = new NestedTransaction();
final List<Node> removed;
if (!node.type().isHost()) {
removed = List.of(node);
db.removeNodes(removed, transaction);
} else {
removed = removeChildren(node, force, transaction);
if (zone.cloud().dynamicProvisioning()) {
db.removeNodes(List.of(node), transaction);
} else {
move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction);
}
removed.add(node);
}
transaction.commit();
return removed;
}
}
/** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */
public void forget(Node node) {
if (node.state() != Node.State.deprovisioned)
throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten");
if (node.status().wantToRebuild())
throw new IllegalArgumentException(node + " is rebuilding and cannot be forgotten");
NestedTransaction transaction = new NestedTransaction();
db.removeNodes(List.of(node), transaction);
transaction.commit();
}
private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) {
List<Node> children = list().childrenOf(node).asList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children, transaction);
return new ArrayList<>(children);
}
/**
* Throws if the given node cannot be removed. Removal is allowed if:
* - Tenant node:
* - non-recursively: node is unallocated
* - recursively: node is unallocated or node is in failed|parked
* - Host node: iff in state provisioned|failed|parked
* - Child node:
* - non-recursively: node in state ready
* - recursively: child is in state provisioned|failed|parked|dirty|ready
*/
private void requireRemovable(Node node, boolean removingRecursively, boolean force) {
if (force) return;
if (node.type() == NodeType.tenant && node.allocation().isPresent()) {
EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (!removingRecursively || !removableStates.contains(node.state()))
illegal(node + " is currently allocated and cannot be removed while in " + node.state());
}
final Set<Node.State> removableStates;
if (node.type().isHost()) {
removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked);
} else {
removableStates = removingRecursively
? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready)
: EnumSet.of(Node.State.ready);
}
if (!removableStates.contains(node.state()))
illegal(node + " can not be removed while in " + node.state());
}
/**
* Throws if given node cannot be breakfixed.
* Breakfix is allowed if the following is true:
* - Node is tenant host
* - Node is in zone without dynamic provisioning
* - Node is in parked or failed state
*/
private void requireBreakfixable(Node node) {
if (zone.cloud().dynamicProvisioning()) {
illegal("Can not breakfix in zone: " + zone);
}
if (node.type() != NodeType.host) {
illegal(node + " can not be breakfixed as it is not a tenant host");
}
Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (! legalStates.contains(node.state())) {
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Increases the restart generation of the active nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> restartActive(Predicate<Node> filter) {
return restart(NodeFilter.in(Set.of(Node.State.active)).and(filter));
}
/**
* Increases the restart generation of the any nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> restart(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
/**
* Increases the reboot generation of the nodes matching the filter.
*
* @return the nodes in their new state
*/
public List<Node> reboot(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
/**
* Set target OS version of all nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> write(node.withWantedOsVersion(version), lock));
}
/** Retire nodes matching given filter */
public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) {
return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock));
}
/** Retire and deprovision given host and all of its children */
public List<Node> deprovision(String hostname, Agent agent, Instant instant) {
return decommission(hostname, HostOperation.deprovision, agent, instant);
}
/** Rebuild given host */
public List<Node> rebuild(String hostname, boolean soft, Agent agent, Instant instant) {
return decommission(hostname, soft ? HostOperation.softRebuild : HostOperation.rebuild, agent, instant);
}
private List<Node> decommission(String hostname, HostOperation op, Agent agent, Instant instant) {
Optional<NodeMutex> nodeMutex = lockAndGet(hostname);
if (nodeMutex.isEmpty()) return List.of();
Node host = nodeMutex.get().node();
if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host);
boolean wantToDeprovision = op == HostOperation.deprovision;
boolean wantToRebuild = op == HostOperation.rebuild || op == HostOperation.softRebuild;
boolean wantToRetire = op.needsRetirement();
List<Node> result = new ArrayList<>();
try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) {
Node newHost = lock.node().withWantToRetire(wantToRetire, wantToDeprovision, wantToRebuild, agent, instant);
result.add(write(newHost, lock));
}
if (wantToRetire) {
List<Node> updatedNodes = performOn(list().childrenOf(host), (node, nodeLock) -> {
Node newNode = node.withWantToRetire(wantToRetire, wantToDeprovision, wantToRebuild, agent, instant);
return write(newNode, nodeLock);
});
result.addAll(updatedNodes);
}
return result;
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written node for convenience
*/
public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) {
return performOn(list().matching(filter), action);
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : nodes) {
Optional<ApplicationId> applicationId = applicationIdForLock(node);
if (applicationId.isPresent())
allocatedNodes.put(applicationId.get(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = applications.lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue()) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
}
return resultingNodes;
}
public boolean canAllocateTenantNodeTo(Node host) {
return canAllocateTenantNodeTo(host, zone.cloud().dynamicProvisioning());
}
public boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) {
if ( ! host.type().canRun(NodeType.tenant)) return false;
if (host.status().wantToRetire()) return false;
if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false;
if (suspended(host)) return false;
if (dynamicProvisioning)
return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state());
else
return host.state() == Node.State.active;
}
public boolean suspended(Node node) {
try {
return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended();
} catch (HostNameNotFoundException e) {
return false;
}
}
/** Create a lock which provides exclusive rights to modifying unallocated nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Returns the unallocated/application lock, and the node acquired under that lock. */
private Optional<NodeMutex> lockAndGet(Node node, Optional<Duration> timeout) {
Node staleNode = node;
final int maxRetries = 4;
for (int i = 0; i < maxRetries; ++i) {
Mutex lockToClose = lock(staleNode, timeout);
try {
Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state());
if (freshNode.isEmpty()) {
freshNode = node(staleNode.hostname());
if (freshNode.isEmpty()) {
return Optional.empty();
}
}
if (node.type() != NodeType.tenant ||
Objects.equals(freshNode.get().allocation().map(Allocation::owner),
staleNode.allocation().map(Allocation::owner))) {
NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose);
lockToClose = null;
return Optional.of(nodeMutex);
}
staleNode = freshNode.get();
} finally {
if (lockToClose != null) lockToClose.close();
}
}
throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " +
"fetching an up to date node under lock: " + node.hostname());
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(String hostname) {
return node(hostname).flatMap(this::lockAndGet);
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(String hostname, Duration timeout) {
return node(hostname).flatMap(node -> lockAndGet(node, Optional.of(timeout)));
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(Node node) { return lockAndGet(node, Optional.empty()); }
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(Node node, Duration timeout) { return lockAndGet(node, Optional.of(timeout)); }
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(Node node) {
return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'"));
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(String hostname) {
return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(String hostname, Duration timeout) {
return lockAndGet(hostname, timeout).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
private Mutex lock(Node node, Optional<Duration> timeout) {
Optional<ApplicationId> application = applicationIdForLock(node);
if (application.isPresent())
return timeout.map(t -> applications.lock(application.get(), t))
.orElseGet(() -> applications.lock(application.get()));
else
return timeout.map(db::lockInactive).orElseGet(db::lockInactive);
}
private Node requireNode(String hostname) {
return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
/** Returns the application ID that should be used for locking when modifying this node */
private static Optional<ApplicationId> applicationIdForLock(Node node) {
return switch (node.type()) {
case tenant -> node.allocation().map(Allocation::owner);
case host -> Optional.of(InfrastructureApplication.TENANT_HOST.id());
case config -> Optional.of(InfrastructureApplication.CONFIG_SERVER.id());
case confighost -> Optional.of(InfrastructureApplication.CONFIG_SERVER_HOST.id());
case controller -> Optional.of(InfrastructureApplication.CONTROLLER.id());
case controllerhost -> Optional.of(InfrastructureApplication.CONTROLLER_HOST.id());
case proxy -> Optional.of(InfrastructureApplication.PROXY.id());
case proxyhost -> Optional.of(InfrastructureApplication.PROXY_HOST.id());
};
}
private static void illegal(String message) {
throw new IllegalArgumentException(message);
}
/** Returns whether node should be parked when deallocated by given agent */
private static boolean parkOnDeallocationOf(Node node, Agent agent) {
if (node.state() == Node.State.parked) return false;
if (agent == Agent.operator) return false;
if (node.type() == NodeType.tenant && node.status().wantToDeprovision()) return false;
boolean retirementRequestedByOperator = node.status().wantToRetire() &&
node.history().event(History.Event.Type.wantToRetire)
.map(History.Event::agent)
.map(a -> a == Agent.operator)
.orElse(false);
return node.status().wantToDeprovision() ||
node.status().wantToRebuild() ||
retirementRequestedByOperator;
}
private enum HostOperation {
/** Host is deprovisioned and data is destroyed */
deprovision(true),
/** Host is deprovisioned, the same host is later re-provisioned and data is destroyed */
rebuild(true),
/** Host is stopped and re-bootstrapped, data is preserved */
softRebuild(false);
private final boolean needsRetirement;
HostOperation(boolean needsRetirement) {
this.needsRetirement = needsRetirement;
}
/** Returns whether this operation requires the host and its children to be retired */
public boolean needsRetirement() {
return needsRetirement;
}
}
} |
You are right, thanks! | public HttpResponse getLogs(String logServerUri, Optional<Instant> deployTime) {
HttpGet get = new HttpGet(logServerUri);
try {
return new ProxyResponse(httpClient.execute(get));
} catch (IOException e) {
if (deployTime.isPresent() && Instant.now().isAfter(deployTime.get().minus(Duration.ofMinutes(1))))
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
outputStream.write("".getBytes(StandardCharsets.UTF_8));
}
};
return HttpErrorResponse.internalServerError("Failed to get logs: " + Exceptions.toMessageString(e));
}
} | if (deployTime.isPresent() && Instant.now().isAfter(deployTime.get().minus(Duration.ofMinutes(1)))) | public HttpResponse getLogs(String logServerUri, Optional<Instant> deployTime) {
HttpGet get = new HttpGet(logServerUri);
try {
return new ProxyResponse(httpClient.execute(get));
} catch (IOException e) {
if (deployTime.isPresent() && Instant.now().isBefore(deployTime.get().plus(Duration.ofMinutes(1))))
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
outputStream.write("".getBytes(StandardCharsets.UTF_8));
}
};
return HttpErrorResponse.internalServerError("Failed to get logs: " + Exceptions.toMessageString(e));
}
} | class LogRetriever {
private final CloseableHttpClient httpClient = VespaHttpClientBuilder.create().build();
} | class LogRetriever {
private final CloseableHttpClient httpClient = VespaHttpClientBuilder.create().build();
} |
Perhaps use nanoTime instead that will only move forward. | public void deconstruct() {
log.log(Level.INFO, "Deconstructing mbus server: " + server);
long start = System.currentTimeMillis();
server.close();
server.release();
sessionRef.getReference().close();
log.log(Level.INFO, String.format("Mbus server deconstruction completed in %.3f seconds",
(System.currentTimeMillis()-start)/1000D));
} | long start = System.currentTimeMillis(); | public void deconstruct() {
log.log(Level.INFO, "Deconstructing mbus server: " + server);
long start = System.currentTimeMillis();
server.close();
server.release();
sessionRef.getReference().close();
log.log(Level.INFO, String.format("Mbus server deconstruction completed in %.3f seconds",
(System.currentTimeMillis()-start)/1000D));
} | class MbusServerProvider implements Provider<MbusServer> {
private static final Logger log = Logger.getLogger(MbusServerProvider.class.getName());
private final MbusServer server;
private final ReferencedResource<SharedIntermediateSession> sessionRef;
public MbusServerProvider(ComponentId id, SessionCache sessionCache, CurrentContainer currentContainer) {
ComponentId chainId = id.withoutNamespace();
sessionRef = sessionCache.retainIntermediate(createIntermediateSessionParams(true, chainId.stringValue()));
server = new MbusServer(currentContainer, sessionRef.getResource());
}
static IntermediateSessionParams createIntermediateSessionParams(boolean broadcastName, String name) {
IntermediateSessionParams intermediateParams = new IntermediateSessionParams();
intermediateParams.setBroadcastName(broadcastName);
intermediateParams.setName(name);
return intermediateParams;
}
public SharedIntermediateSession getSession() {
return sessionRef.getResource();
}
@Override
public MbusServer get() {
return server;
}
@Override
} | class MbusServerProvider implements Provider<MbusServer> {
private static final Logger log = Logger.getLogger(MbusServerProvider.class.getName());
private final MbusServer server;
private final ReferencedResource<SharedIntermediateSession> sessionRef;
public MbusServerProvider(ComponentId id, SessionCache sessionCache, CurrentContainer currentContainer) {
ComponentId chainId = id.withoutNamespace();
sessionRef = sessionCache.retainIntermediate(createIntermediateSessionParams(true, chainId.stringValue()));
server = new MbusServer(currentContainer, sessionRef.getResource());
}
static IntermediateSessionParams createIntermediateSessionParams(boolean broadcastName, String name) {
IntermediateSessionParams intermediateParams = new IntermediateSessionParams();
intermediateParams.setBroadcastName(broadcastName);
intermediateParams.setName(name);
return intermediateParams;
}
public SharedIntermediateSession getSession() {
return sessionRef.getResource();
}
@Override
public MbusServer get() {
return server;
}
@Override
} |
Ideally although less important as this is pure logging. Logging itself most likely uses `System.currentMillis()` so this time will correlate perfectly with log timestamps. | public void deconstruct() {
log.log(Level.INFO, "Deconstructing mbus server: " + server);
long start = System.currentTimeMillis();
server.close();
server.release();
sessionRef.getReference().close();
log.log(Level.INFO, String.format("Mbus server deconstruction completed in %.3f seconds",
(System.currentTimeMillis()-start)/1000D));
} | long start = System.currentTimeMillis(); | public void deconstruct() {
log.log(Level.INFO, "Deconstructing mbus server: " + server);
long start = System.currentTimeMillis();
server.close();
server.release();
sessionRef.getReference().close();
log.log(Level.INFO, String.format("Mbus server deconstruction completed in %.3f seconds",
(System.currentTimeMillis()-start)/1000D));
} | class MbusServerProvider implements Provider<MbusServer> {
private static final Logger log = Logger.getLogger(MbusServerProvider.class.getName());
private final MbusServer server;
private final ReferencedResource<SharedIntermediateSession> sessionRef;
public MbusServerProvider(ComponentId id, SessionCache sessionCache, CurrentContainer currentContainer) {
ComponentId chainId = id.withoutNamespace();
sessionRef = sessionCache.retainIntermediate(createIntermediateSessionParams(true, chainId.stringValue()));
server = new MbusServer(currentContainer, sessionRef.getResource());
}
static IntermediateSessionParams createIntermediateSessionParams(boolean broadcastName, String name) {
IntermediateSessionParams intermediateParams = new IntermediateSessionParams();
intermediateParams.setBroadcastName(broadcastName);
intermediateParams.setName(name);
return intermediateParams;
}
public SharedIntermediateSession getSession() {
return sessionRef.getResource();
}
@Override
public MbusServer get() {
return server;
}
@Override
} | class MbusServerProvider implements Provider<MbusServer> {
private static final Logger log = Logger.getLogger(MbusServerProvider.class.getName());
private final MbusServer server;
private final ReferencedResource<SharedIntermediateSession> sessionRef;
public MbusServerProvider(ComponentId id, SessionCache sessionCache, CurrentContainer currentContainer) {
ComponentId chainId = id.withoutNamespace();
sessionRef = sessionCache.retainIntermediate(createIntermediateSessionParams(true, chainId.stringValue()));
server = new MbusServer(currentContainer, sessionRef.getResource());
}
static IntermediateSessionParams createIntermediateSessionParams(boolean broadcastName, String name) {
IntermediateSessionParams intermediateParams = new IntermediateSessionParams();
intermediateParams.setBroadcastName(broadcastName);
intermediateParams.setName(name);
return intermediateParams;
}
public SharedIntermediateSession getSession() {
return sessionRef.getResource();
}
@Override
public MbusServer get() {
return server;
}
@Override
} |
Yes, but when you do System.currentTime() - start, you might get surprised :) | public void deconstruct() {
log.log(Level.INFO, "Deconstructing mbus server: " + server);
long start = System.currentTimeMillis();
server.close();
server.release();
sessionRef.getReference().close();
log.log(Level.INFO, String.format("Mbus server deconstruction completed in %.3f seconds",
(System.currentTimeMillis()-start)/1000D));
} | long start = System.currentTimeMillis(); | public void deconstruct() {
log.log(Level.INFO, "Deconstructing mbus server: " + server);
long start = System.currentTimeMillis();
server.close();
server.release();
sessionRef.getReference().close();
log.log(Level.INFO, String.format("Mbus server deconstruction completed in %.3f seconds",
(System.currentTimeMillis()-start)/1000D));
} | class MbusServerProvider implements Provider<MbusServer> {
private static final Logger log = Logger.getLogger(MbusServerProvider.class.getName());
private final MbusServer server;
private final ReferencedResource<SharedIntermediateSession> sessionRef;
public MbusServerProvider(ComponentId id, SessionCache sessionCache, CurrentContainer currentContainer) {
ComponentId chainId = id.withoutNamespace();
sessionRef = sessionCache.retainIntermediate(createIntermediateSessionParams(true, chainId.stringValue()));
server = new MbusServer(currentContainer, sessionRef.getResource());
}
static IntermediateSessionParams createIntermediateSessionParams(boolean broadcastName, String name) {
IntermediateSessionParams intermediateParams = new IntermediateSessionParams();
intermediateParams.setBroadcastName(broadcastName);
intermediateParams.setName(name);
return intermediateParams;
}
public SharedIntermediateSession getSession() {
return sessionRef.getResource();
}
@Override
public MbusServer get() {
return server;
}
@Override
} | class MbusServerProvider implements Provider<MbusServer> {
private static final Logger log = Logger.getLogger(MbusServerProvider.class.getName());
private final MbusServer server;
private final ReferencedResource<SharedIntermediateSession> sessionRef;
public MbusServerProvider(ComponentId id, SessionCache sessionCache, CurrentContainer currentContainer) {
ComponentId chainId = id.withoutNamespace();
sessionRef = sessionCache.retainIntermediate(createIntermediateSessionParams(true, chainId.stringValue()));
server = new MbusServer(currentContainer, sessionRef.getResource());
}
static IntermediateSessionParams createIntermediateSessionParams(boolean broadcastName, String name) {
IntermediateSessionParams intermediateParams = new IntermediateSessionParams();
intermediateParams.setBroadcastName(broadcastName);
intermediateParams.setName(name);
return intermediateParams;
}
public SharedIntermediateSession getSession() {
return sessionRef.getResource();
}
@Override
public MbusServer get() {
return server;
}
@Override
} |
Add a `get(ZoneId)` to `ZoneRegistry`/`ZoneList`? | private CloudName findCloud(JobType job) {
return zones.zones().all().among(job.zone()).zones().stream().findFirst().map(ZoneApi::getCloudName).orElse(null);
} | return zones.zones().all().among(job.zone()).zones().stream().findFirst().map(ZoneApi::getCloudName).orElse(null); | private CloudName findCloud(JobType job) {
return zones.zones().all().get(job.zone()).map(ZoneApi::getCloudName).orElse(null);
} | class DeploymentStatus {
private static <T> List<T> union(List<T> first, List<T> second) {
return Stream.concat(first.stream(), second.stream()).distinct().collect(toUnmodifiableList());
}
private final Application application;
private final JobList allJobs;
private final VersionStatus versionStatus;
private final Version systemVersion;
private final Function<InstanceName, VersionCompatibility> versionCompatibility;
private final ZoneRegistry zones;
private final Instant now;
private final Map<JobId, StepStatus> jobSteps;
private final List<StepStatus> allSteps;
public DeploymentStatus(Application application, Function<JobId, JobStatus> allJobs, ZoneRegistry zones, VersionStatus versionStatus,
Version systemVersion, Function<InstanceName, VersionCompatibility> versionCompatibility, Instant now) {
this.application = requireNonNull(application);
this.zones = zones;
this.versionStatus = requireNonNull(versionStatus);
this.systemVersion = requireNonNull(systemVersion);
this.versionCompatibility = versionCompatibility;
this.now = requireNonNull(now);
List<StepStatus> allSteps = new ArrayList<>();
Map<JobId, JobStatus> jobs = new HashMap<>();
this.jobSteps = jobDependencies(application.deploymentSpec(), allSteps, job -> jobs.computeIfAbsent(job, allJobs));
this.allSteps = Collections.unmodifiableList(allSteps);
this.allJobs = JobList.from(jobSteps.keySet().stream().map(allJobs).collect(toList()));
}
private JobType systemTest(JobType dependent) {
return JobType.systemTest(zones, dependent == null ? null : findCloud(dependent));
}
private JobType stagingTest(JobType dependent) {
return JobType.stagingTest(zones, dependent == null ? null : findCloud(dependent));
}
/** The application this deployment status concerns. */
public Application application() {
return application;
}
/** A filterable list of the status of all jobs for this application. */
public JobList jobs() {
return allJobs;
}
/** Whether any jobs both dependent on the dependency, and a dependency for the dependent, are failing. */
private boolean hasFailures(StepStatus dependency, StepStatus dependent) {
Set<StepStatus> dependents = new HashSet<>();
fillDependents(dependency, new HashSet<>(), dependents, dependent);
Set<JobId> criticalJobs = dependents.stream().flatMap(step -> step.job().stream()).collect(toSet());
return ! allJobs.matching(job -> criticalJobs.contains(job.id()))
.failingHard()
.isEmpty();
}
private boolean fillDependents(StepStatus dependency, Set<StepStatus> visited, Set<StepStatus> dependents, StepStatus current) {
if (visited.contains(current))
return dependents.contains(current);
if (dependency == current)
dependents.add(current);
else
for (StepStatus dep : current.dependencies)
if (fillDependents(dependency, visited, dependents, dep))
dependents.add(current);
visited.add(current);
return dependents.contains(current);
}
/** Whether any job is failing on versions selected by the given filter, with errors other than lack of capacity in a test zone.. */
public boolean hasFailures(Predicate<RevisionId> revisionFilter) {
return ! allJobs.failingHard()
.matching(job -> revisionFilter.test(job.lastTriggered().get().versions().targetRevision()))
.isEmpty();
}
/** Whether any jobs of this application are failing with other errors than lack of capacity in a test zone. */
public boolean hasFailures() {
return ! allJobs.failingHard().isEmpty();
}
/** All job statuses, by job type, for the given instance. */
public Map<JobType, JobStatus> instanceJobs(InstanceName instance) {
return allJobs.asList().stream()
.filter(job -> job.id().application().equals(application.id().instance(instance)))
.collect(CustomCollectors.toLinkedMap(job -> job.id().type(), Function.identity()));
}
/** Filterable job status lists for each instance of this application. */
public Map<ApplicationId, JobList> instanceJobs() {
return allJobs.groupingBy(job -> job.id().application());
}
/**
* The set of jobs that need to run for the changes of each instance of the application to be considered complete,
* and any test jobs for any outstanding change, which will likely be needed to later deploy this change.
*/
public Map<JobId, List<Job>> jobsToRun() {
if (application.revisions().last().isEmpty()) return Map.of();
Map<InstanceName, Change> changes = new LinkedHashMap<>();
for (InstanceName instance : application.deploymentSpec().instanceNames())
changes.put(instance, application.require(instance).change());
Map<JobId, List<Job>> jobs = jobsToRun(changes);
Map<InstanceName, Change> outstandingChanges = new LinkedHashMap<>();
for (InstanceName instance : application.deploymentSpec().instanceNames()) {
Change outstanding = outstandingChange(instance);
if (outstanding.hasTargets())
outstandingChanges.put(instance, outstanding.onTopOf(application.require(instance).change()));
}
var testJobs = jobsToRun(outstandingChanges, true).entrySet().stream()
.filter(entry -> ! entry.getKey().type().isProduction());
return Stream.concat(jobs.entrySet().stream(), testJobs)
.collect(collectingAndThen(toMap(Map.Entry::getKey,
Map.Entry::getValue,
DeploymentStatus::union,
LinkedHashMap::new),
Collections::unmodifiableMap));
}
private Map<JobId, List<Job>> jobsToRun(Map<InstanceName, Change> changes, boolean eagerTests) {
if (application.revisions().last().isEmpty()) return Map.of();
Map<JobId, List<Job>> productionJobs = new LinkedHashMap<>();
changes.forEach((instance, change) -> productionJobs.putAll(productionJobs(instance, change, eagerTests)));
Map<JobId, List<Job>> testJobs = testJobs(productionJobs);
Map<JobId, List<Job>> jobs = new LinkedHashMap<>(testJobs);
jobs.putAll(productionJobs);
jobSteps.forEach((job, step) -> {
if ( ! step.isDeclared() || job.type().isProduction() || jobs.containsKey(job))
return;
Change change = changes.get(job.application().instance());
if (change == null || ! change.hasTargets())
return;
Collection<Optional<JobId>> firstProductionJobsWithDeployment = jobSteps.keySet().stream()
.filter(jobId -> jobId.type().isProduction() && jobId.type().isDeployment())
.filter(jobId -> deploymentFor(jobId).isPresent())
.collect(groupingBy(jobId -> findCloud(jobId.type()),
Collectors.reducing((o, n) -> o)))
.values();
if (firstProductionJobsWithDeployment.isEmpty())
firstProductionJobsWithDeployment = List.of(Optional.empty());
for (Optional<JobId> firstProductionJobWithDeploymentInCloud : firstProductionJobsWithDeployment) {
Versions versions = Versions.from(change,
application,
firstProductionJobWithDeploymentInCloud.flatMap(this::deploymentFor),
fallbackPlatform(change, job));
if (step.completedAt(change, firstProductionJobWithDeploymentInCloud).isEmpty()) {
JobType actualType = job.type().isSystemTest() ? systemTest(firstProductionJobWithDeploymentInCloud.map(JobId::type).orElse(null))
: stagingTest(firstProductionJobWithDeploymentInCloud.map(JobId::type).orElse(null));
jobs.merge(job, List.of(new Job(actualType, versions, step.readyAt(change), change)), DeploymentStatus::union);
}
}
});
return Collections.unmodifiableMap(jobs);
}
/** Fall back to the newest, deployable platform, which is compatible with what we want to deploy. */
public Version fallbackPlatform(Change change, JobId job) {
Optional<Version> compileVersion = change.revision().map(application.revisions()::get).flatMap(ApplicationVersion::compileVersion);
if (compileVersion.isEmpty())
return systemVersion;
for (VespaVersion version : reversed(versionStatus.deployableVersions()))
if (versionCompatibility.apply(job.application().instance()).accept(version.versionNumber(), compileVersion.get()))
return version.versionNumber();
throw new IllegalArgumentException("no legal platform version exists in this system for compile version " + compileVersion.get());
}
/** The set of jobs that need to run for the given changes to be considered complete. */
public boolean hasCompleted(InstanceName instance, Change change) {
if ( ! application.deploymentSpec().requireInstance(instance).concerns(prod)) {
if (newestTested(instance, run -> run.versions().targetRevision()).map(change::downgrades).orElse(false)) return true;
if (newestTested(instance, run -> run.versions().targetPlatform()).map(change::downgrades).orElse(false)) return true;
}
return jobsToRun(Map.of(instance, change), false).isEmpty();
}
/** The set of jobs that need to run for the given changes to be considered complete. */
private Map<JobId, List<Job>> jobsToRun(Map<InstanceName, Change> changes) {
return jobsToRun(changes, false);
}
/** The step status for all steps in the deployment spec of this, which are jobs, in the same order as in the deployment spec. */
public Map<JobId, StepStatus> jobSteps() { return jobSteps; }
public Map<InstanceName, StepStatus> instanceSteps() {
ImmutableMap.Builder<InstanceName, StepStatus> instances = ImmutableMap.builder();
for (StepStatus status : allSteps)
if (status instanceof InstanceStatus)
instances.put(status.instance(), status);
return instances.build();
}
/** The step status for all relevant steps in the deployment spec of this, in the same order as in the deployment spec. */
public List<StepStatus> allSteps() {
return allSteps;
}
public Optional<Deployment> deploymentFor(JobId job) {
return Optional.ofNullable(application.require(job.application().instance())
.deployments().get(job.type().zone()));
}
private <T extends Comparable<T>> Optional<T> newestTested(InstanceName instance, Function<Run, T> runMapper) {
Set<CloudName> clouds = jobSteps.keySet().stream()
.filter(job -> job.type().isProduction())
.map(job -> findCloud(job.type()))
.collect(toSet());
List<ZoneId> testZones = new ArrayList<>();
if (application.deploymentSpec().requireInstance(instance).concerns(test)) {
if (clouds.isEmpty()) testZones.add(JobType.systemTest(zones, null).zone());
else for (CloudName cloud: clouds) testZones.add(JobType.systemTest(zones, cloud).zone());
}
if (application.deploymentSpec().requireInstance(instance).concerns(staging)) {
if (clouds.isEmpty()) testZones.add(JobType.stagingTest(zones, null).zone());
else for (CloudName cloud: clouds) testZones.add(JobType.stagingTest(zones, cloud).zone());
}
Map<ZoneId, Optional<T>> newestPerZone = instanceJobs().get(application.id().instance(instance))
.type(systemTest(null), stagingTest(null))
.asList().stream().flatMap(jobs -> jobs.runs().values().stream())
.filter(Run::hasSucceeded)
.collect(groupingBy(run -> run.id().type().zone(),
mapping(runMapper, Collectors.maxBy(naturalOrder()))));
return newestPerZone.keySet().containsAll(testZones)
? testZones.stream().map(newestPerZone::get)
.reduce((o, n) -> o.isEmpty() || n.isEmpty() ? Optional.empty() : n.get().compareTo(o.get()) < 0 ? n : o)
.orElse(Optional.empty())
: Optional.empty();
}
/**
* The change to a revision which all dependencies of the given instance has completed,
* which does not downgrade any deployments in the instance,
* which is not already rolling out to the instance, and
* which causes at least one job to run if deployed to the instance.
* For the "exclusive" revision upgrade policy it is the oldest such revision; otherwise, it is the latest.
*/
public Change outstandingChange(InstanceName instance) {
StepStatus status = instanceSteps().get(instance);
if (status == null) return Change.empty();
DeploymentInstanceSpec spec = application.deploymentSpec().requireInstance(instance);
boolean ascending = next == spec.revisionTarget();
int cumulativeRisk = 0;
int nextRisk = 0;
int skippedCumulativeRisk = 0;
Instant readySince = now;
Optional<RevisionId> newestRevision = application.productionDeployments()
.getOrDefault(instance, List.of()).stream()
.map(Deployment::revision).max(naturalOrder());
Change candidate = Change.empty();
for (ApplicationVersion version : application.revisions().deployable(ascending)) {
Change change = Change.of(version.id());
if ( newestRevision.isPresent() && change.downgrades(newestRevision.get())
|| ! application.require(instance).change().revision().map(change::upgrades).orElse(true)
|| hasCompleted(instance, change)) {
if (ascending) continue;
else return Change.empty();
}
skippedCumulativeRisk += version.risk();
nextRisk = nextRisk > 0 ? nextRisk : version.risk();
Optional<Instant> readyAt = status.dependenciesCompletedAt(Change.of(version.id()), Optional.empty());
if (readyAt.map(now::isBefore).orElse(true)) continue;
cumulativeRisk += skippedCumulativeRisk;
skippedCumulativeRisk = 0;
nextRisk = 0;
if (cumulativeRisk >= spec.maxRisk())
return candidate.equals(Change.empty()) ? change : candidate;
if (readyAt.get().isBefore(readySince)) readySince = readyAt.get();
candidate = change;
}
return instanceJobs(instance).values().stream().allMatch(jobs -> jobs.lastTriggered().isEmpty())
|| cumulativeRisk >= spec.minRisk()
|| cumulativeRisk + nextRisk > spec.maxRisk()
|| ! now.isBefore(readySince.plus(Duration.ofHours(spec.maxIdleHours())))
? candidate : Change.empty();
}
/** Earliest instant when job was triggered with given versions, or both system and staging tests were successful. */
public Optional<Instant> verifiedAt(JobId job, Versions versions) {
Optional<Instant> triggeredAt = allJobs.get(job)
.flatMap(status -> status.runs().values().stream()
.filter(run -> run.versions().equals(versions))
.findFirst())
.map(Run::start);
Optional<Instant> systemTestedAt = testedAt(job.application(), systemTest(null), versions);
Optional<Instant> stagingTestedAt = testedAt(job.application(), stagingTest(null), versions);
if (systemTestedAt.isEmpty() || stagingTestedAt.isEmpty()) return triggeredAt;
Optional<Instant> testedAt = systemTestedAt.get().isAfter(stagingTestedAt.get()) ? systemTestedAt : stagingTestedAt;
return triggeredAt.isPresent() && triggeredAt.get().isBefore(testedAt.get()) ? triggeredAt : testedAt;
}
/** Earliest instant when versions were tested for the given instance */
private Optional<Instant> testedAt(ApplicationId instance, JobType type, Versions versions) {
return declaredTest(instance, type).map(__ -> allJobs.instance(instance.instance()))
.orElse(allJobs)
.type(type).asList().stream()
.flatMap(status -> RunList.from(status)
.on(versions)
.matching(run -> run.id().type().zone().equals(type.zone()))
.matching(Run::hasSucceeded)
.asList().stream()
.map(Run::start))
.min(naturalOrder());
}
private Map<JobId, List<Job>> productionJobs(InstanceName instance, Change change, boolean assumeUpgradesSucceed) {
Map<JobId, List<Job>> jobs = new LinkedHashMap<>();
jobSteps.forEach((job, step) -> {
if ( ! job.application().instance().equals(instance) || ! job.type().isProduction())
return;
if (step.completedAt(change, Optional.of(job)).isPresent())
return;
Optional<Deployment> deployment = deploymentFor(job);
Optional<Version> existingPlatform = deployment.map(Deployment::version);
Optional<RevisionId> existingRevision = deployment.map(Deployment::revision);
boolean deployingCompatibilityChange = areIncompatible(existingPlatform, change.revision(), job)
|| areIncompatible(change.platform(), existingRevision, job);
if (assumeUpgradesSucceed) {
if (deployingCompatibilityChange)
return;
Change currentChange = application.require(instance).change();
Versions target = Versions.from(currentChange, application, deployment, fallbackPlatform(currentChange, job));
existingPlatform = Optional.of(target.targetPlatform());
existingRevision = Optional.of(target.targetRevision());
}
List<Job> toRun = new ArrayList<>();
List<Change> changes = deployingCompatibilityChange ? List.of(change) : changes(job, step, change);
for (Change partial : changes) {
Job jobToRun = new Job(job.type(),
Versions.from(partial, application, existingPlatform, existingRevision, fallbackPlatform(partial, job)),
step.readyAt(partial, Optional.of(job)),
partial);
toRun.add(jobToRun);
existingPlatform = Optional.of(jobToRun.versions.targetPlatform());
existingRevision = Optional.of(jobToRun.versions.targetRevision());
}
jobs.put(job, toRun);
});
return jobs;
}
private boolean areIncompatible(Optional<Version> platform, Optional<RevisionId> revision, JobId job) {
Optional<Version> compileVersion = revision.map(application.revisions()::get)
.flatMap(ApplicationVersion::compileVersion);
return platform.isPresent()
&& compileVersion.isPresent()
&& versionCompatibility.apply(job.application().instance()).refuse(platform.get(), compileVersion.get());
}
/** Changes to deploy with the given job, possibly split in two steps. */
private List<Change> changes(JobId job, StepStatus step, Change change) {
if (change.platform().isEmpty() || change.revision().isEmpty() || change.isPinned())
return List.of(change);
if ( step.completedAt(change.withoutApplication(), Optional.of(job)).isPresent()
|| step.completedAt(change.withoutPlatform(), Optional.of(job)).isPresent())
return List.of(change);
JobId deployment = new JobId(job.application(), JobType.deploymentTo(job.type().zone()));
UpgradeRollout rollout = application.deploymentSpec().requireInstance(job.application().instance()).upgradeRollout();
if (job.type().isTest()) {
Optional<Instant> platformDeployedAt = jobSteps.get(deployment).completedAt(change.withoutApplication(), Optional.of(deployment));
Optional<Instant> revisionDeployedAt = jobSteps.get(deployment).completedAt(change.withoutPlatform(), Optional.of(deployment));
if (platformDeployedAt.isEmpty() && revisionDeployedAt.isPresent()) return List.of(change.withoutPlatform(), change);
if (platformDeployedAt.isPresent() && revisionDeployedAt.isEmpty()) {
if (jobSteps.get(deployment).readyAt(change, Optional.of(deployment))
.map(ready -> ! now.isBefore(ready)).orElse(false)) {
switch (rollout) {
case separate: return hasFailures(jobSteps.get(deployment), jobSteps.get(job)) ? List.of(change) : List.of(change.withoutApplication(), change);
case leading: return List.of(change);
case simultaneous: return List.of(change.withoutPlatform(), change);
}
}
return List.of(change.withoutApplication(), change);
}
}
Optional<Instant> platformReadyAt = step.dependenciesCompletedAt(change.withoutApplication(), Optional.of(job));
Optional<Instant> revisionReadyAt = step.dependenciesCompletedAt(change.withoutPlatform(), Optional.of(job));
if (platformReadyAt.isEmpty() && revisionReadyAt.isEmpty()) {
switch (rollout) {
case separate: return List.of(change.withoutApplication(), change);
case leading: return List.of(change);
case simultaneous: return List.of(change.withoutPlatform(), change);
}
}
if (platformReadyAt.isEmpty()) return List.of(change.withoutPlatform(), change);
if (revisionReadyAt.isEmpty()) {
return List.of(change.withoutApplication(), change);
}
boolean platformReadyFirst = platformReadyAt.get().isBefore(revisionReadyAt.get());
boolean revisionReadyFirst = revisionReadyAt.get().isBefore(platformReadyAt.get());
boolean failingUpgradeOnlyTests = ! jobs().type(systemTest(job.type()), stagingTest(job.type()))
.failingHardOn(Versions.from(change.withoutApplication(), application, deploymentFor(job), systemVersion))
.isEmpty();
switch (rollout) {
case separate:
return (platformReadyFirst || platformReadyAt.get().equals(Instant.EPOCH))
? step.job().flatMap(jobs()::get).flatMap(JobStatus::firstFailing).isPresent() || failingUpgradeOnlyTests
? List.of(change)
: List.of(change.withoutApplication(), change)
: revisionReadyFirst
? List.of(change.withoutPlatform(), change)
: List.of(change);
case leading:
return List.of(change);
case simultaneous:
return platformReadyFirst ? List.of(change) : List.of(change.withoutPlatform(), change);
default: throw new IllegalStateException("Unknown upgrade rollout policy");
}
}
/** The test jobs that need to run prior to the given production deployment jobs. */
public Map<JobId, List<Job>> testJobs(Map<JobId, List<Job>> jobs) {
Map<JobId, List<Job>> testJobs = new LinkedHashMap<>();
jobs.forEach((job, versionsList) -> {
for (JobType testType : List.of(systemTest(job.type()), stagingTest(job.type()))) {
if (job.type().isProduction() && job.type().isDeployment()) {
declaredTest(job.application(), testType).ifPresent(testJob -> {
for (Job productionJob : versionsList)
if (allJobs.successOn(testType, productionJob.versions()).asList().isEmpty())
testJobs.merge(testJob, List.of(new Job(testJob.type(),
productionJob.versions(),
jobSteps().get(testJob).readyAt(productionJob.change),
productionJob.change)),
DeploymentStatus::union);
});
}
}
});
jobs.forEach((job, versionsList) -> {
for (JobType testType : List.of(systemTest(job.type()), stagingTest(job.type()))) {
for (Job productionJob : versionsList)
if ( job.type().isProduction() && job.type().isDeployment()
&& allJobs.successOn(testType, productionJob.versions()).asList().isEmpty()
&& testJobs.keySet().stream()
.noneMatch(test -> test.type().equals(testType) && test.type().zone().equals(testType.zone())
&& testJobs.get(test).stream().anyMatch(testJob -> testJob.versions().equals(productionJob.versions())))) {
JobId testJob = firstDeclaredOrElseImplicitTest(testType);
testJobs.merge(testJob,
List.of(new Job(testJob.type(),
productionJob.versions(),
jobSteps.get(testJob).readyAt(productionJob.change),
productionJob.change)),
DeploymentStatus::union);
}
}
});
return Collections.unmodifiableMap(testJobs);
}
private JobId firstDeclaredOrElseImplicitTest(JobType testJob) {
return application.deploymentSpec().instanceNames().stream()
.map(name -> new JobId(application.id().instance(name), testJob))
.filter(jobSteps::containsKey)
.min(comparing(id -> ! jobSteps.get(id).isDeclared())).orElseThrow();
}
/** JobId of any declared test of the given type, for the given instance. */
private Optional<JobId> declaredTest(ApplicationId instanceId, JobType testJob) {
JobId jobId = new JobId(instanceId, testJob);
return jobSteps.containsKey(jobId) && jobSteps.get(jobId).isDeclared() ? Optional.of(jobId) : Optional.empty();
}
/** A DAG of the dependencies between the primitive steps in the spec, with iteration order equal to declaration order. */
private Map<JobId, StepStatus> jobDependencies(DeploymentSpec spec, List<StepStatus> allSteps, Function<JobId, JobStatus> jobs) {
if (DeploymentSpec.empty.equals(spec))
return Map.of();
Map<JobId, StepStatus> dependencies = new LinkedHashMap<>();
List<StepStatus> previous = List.of();
for (DeploymentSpec.Step step : spec.steps())
previous = fillStep(dependencies, allSteps, step, previous, null, jobs,
instanceWithImplicitTest(test, spec),
instanceWithImplicitTest(staging, spec));
return Collections.unmodifiableMap(dependencies);
}
private static InstanceName instanceWithImplicitTest(Environment environment, DeploymentSpec spec) {
InstanceName first = null;
for (DeploymentInstanceSpec step : spec.instances()) {
if (step.concerns(environment)) return null;
first = first != null ? first : step.name();
}
return first;
}
/** Adds the primitive steps contained in the given step, which depend on the given previous primitives, to the dependency graph. */
private List<StepStatus> fillStep(Map<JobId, StepStatus> dependencies, List<StepStatus> allSteps, DeploymentSpec.Step step,
List<StepStatus> previous, InstanceName instance, Function<JobId, JobStatus> jobs,
InstanceName implicitSystemTest, InstanceName implicitStagingTest) {
if (step.steps().isEmpty() && ! (step instanceof DeploymentInstanceSpec)) {
if (instance == null)
return previous;
if ( ! step.delay().isZero()) {
StepStatus stepStatus = new DelayStatus((DeploymentSpec.Delay) step, previous, instance);
allSteps.add(stepStatus);
return List.of(stepStatus);
}
JobType jobType;
JobId jobId;
StepStatus stepStatus;
if (step.concerns(test) || step.concerns(staging)) {
jobType = step.concerns(test) ? systemTest(null) : stagingTest(null);
jobId = new JobId(application.id().instance(instance), jobType);
stepStatus = JobStepStatus.ofTestDeployment((DeclaredZone) step, List.of(), this, jobs.apply(jobId), true);
previous = new ArrayList<>(previous);
previous.add(stepStatus);
}
else if (step.isTest()) {
jobType = JobType.test(((DeclaredTest) step).region());
jobId = new JobId(application.id().instance(instance), jobType);
stepStatus = JobStepStatus.ofProductionTest((DeclaredTest) step, previous, this, jobs.apply(jobId));
previous = List.of(stepStatus);
}
else if (step.concerns(prod)) {
jobType = JobType.prod(((DeclaredZone) step).region().get());
jobId = new JobId(application.id().instance(instance), jobType);
stepStatus = JobStepStatus.ofProductionDeployment((DeclaredZone) step, previous, this, jobs.apply(jobId));
previous = List.of(stepStatus);
}
else return previous;
allSteps.add(stepStatus);
dependencies.put(jobId, stepStatus);
return previous;
}
if (step instanceof DeploymentInstanceSpec) {
DeploymentInstanceSpec spec = ((DeploymentInstanceSpec) step);
StepStatus instanceStatus = new InstanceStatus(spec, previous, now, application.require(spec.name()), this);
instance = spec.name();
allSteps.add(instanceStatus);
previous = List.of(instanceStatus);
if (instance.equals(implicitSystemTest)) {
JobId job = new JobId(application.id().instance(instance), systemTest(null));
JobStepStatus testStatus = JobStepStatus.ofTestDeployment(new DeclaredZone(test), List.of(),
this, jobs.apply(job), false);
dependencies.put(job, testStatus);
allSteps.add(testStatus);
}
if (instance.equals(implicitStagingTest)) {
JobId job = new JobId(application.id().instance(instance), stagingTest(null));
JobStepStatus testStatus = JobStepStatus.ofTestDeployment(new DeclaredZone(staging), List.of(),
this, jobs.apply(job), false);
dependencies.put(job, testStatus);
allSteps.add(testStatus);
}
}
if (step.isOrdered()) {
for (DeploymentSpec.Step nested : step.steps())
previous = fillStep(dependencies, allSteps, nested, previous, instance, jobs, implicitSystemTest, implicitStagingTest);
return previous;
}
List<StepStatus> parallel = new ArrayList<>();
for (DeploymentSpec.Step nested : step.steps())
parallel.addAll(fillStep(dependencies, allSteps, nested, previous, instance, jobs, implicitSystemTest, implicitStagingTest));
return List.copyOf(parallel);
}
public enum StepType {
/** An instance — completion marks a change as ready for the jobs contained in it. */
instance,
/** A timed delay. */
delay,
/** A system, staging or production test. */
test,
/** A production deployment. */
deployment,
}
/**
* Used to represent all steps — explicit and implicit — that may run in order to complete deployment of a change.
*
* Each node contains a step describing the node,
* a list of steps which need to be complete before the step may start,
* a list of jobs from which completion of the step is computed, and
* optionally, an instance name used to identify a job type for the step,
*
* The completion criterion for each type of step is implemented in subclasses of this.
*/
public static abstract class StepStatus {
private final StepType type;
private final DeploymentSpec.Step step;
private final List<StepStatus> dependencies;
private final InstanceName instance;
private StepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, InstanceName instance) {
this.type = requireNonNull(type);
this.step = requireNonNull(step);
this.dependencies = List.copyOf(dependencies);
this.instance = instance;
}
/** The type of step this is. */
public final StepType type() { return type; }
/** The step defining this. */
public final DeploymentSpec.Step step() { return step; }
/** The list of steps that need to be complete before this may start. */
public final List<StepStatus> dependencies() { return dependencies; }
/** The instance of this. */
public final InstanceName instance() { return instance; }
/** The id of the job this corresponds to, if any. */
public Optional<JobId> job() { return Optional.empty(); }
/** The time at which this is, or was, complete on the given change and / or versions. */
public Optional<Instant> completedAt(Change change) { return completedAt(change, Optional.empty()); }
/** The time at which this is, or was, complete on the given change and / or versions. */
abstract Optional<Instant> completedAt(Change change, Optional<JobId> dependent);
/** The time at which this step is ready to run the specified change and / or versions. */
public Optional<Instant> readyAt(Change change) { return readyAt(change, Optional.empty()); }
/** The time at which this step is ready to run the specified change and / or versions. */
Optional<Instant> readyAt(Change change, Optional<JobId> dependent) {
return dependenciesCompletedAt(change, dependent)
.map(ready -> Stream.of(blockedUntil(change),
pausedUntil(),
coolingDownUntil(change))
.flatMap(Optional::stream)
.reduce(ready, maxBy(naturalOrder())));
}
/** The time at which all dependencies completed on the given change and / or versions. */
Optional<Instant> dependenciesCompletedAt(Change change, Optional<JobId> dependent) {
Instant latest = Instant.EPOCH;
for (StepStatus step : dependencies) {
Optional<Instant> completedAt = step.completedAt(change, dependent);
if (completedAt.isEmpty()) return Optional.empty();
latest = latest.isBefore(completedAt.get()) ? completedAt.get() : latest;
}
return Optional.of(latest);
}
/** The time until which this step is blocked by a change blocker. */
public Optional<Instant> blockedUntil(Change change) { return Optional.empty(); }
/** The time until which this step is paused by user intervention. */
public Optional<Instant> pausedUntil() { return Optional.empty(); }
/** The time until which this step is cooling down, due to consecutive failures. */
public Optional<Instant> coolingDownUntil(Change change) { return Optional.empty(); }
/** Whether this step is declared in the deployment spec, or is an implicit step. */
public boolean isDeclared() { return true; }
}
private static class DelayStatus extends StepStatus {
private DelayStatus(DeploymentSpec.Delay step, List<StepStatus> dependencies, InstanceName instance) {
super(StepType.delay, step, dependencies, instance);
}
@Override
Optional<Instant> completedAt(Change change, Optional<JobId> dependent) {
return readyAt(change, dependent).map(completion -> completion.plus(step().delay()));
}
}
private static class InstanceStatus extends StepStatus {
private final DeploymentInstanceSpec spec;
private final Instant now;
private final Instance instance;
private final DeploymentStatus status;
private InstanceStatus(DeploymentInstanceSpec spec, List<StepStatus> dependencies, Instant now,
Instance instance, DeploymentStatus status) {
super(StepType.instance, spec, dependencies, spec.name());
this.spec = spec;
this.now = now;
this.instance = instance;
this.status = status;
}
/** The time at which this step is ready to run the specified change and / or versions. */
@Override
public Optional<Instant> readyAt(Change change) {
return status.jobSteps.keySet().stream()
.filter(job -> job.type().isProduction() && job.application().instance().equals(instance.name()))
.map(job -> super.readyAt(change, Optional.of(job)))
.reduce((o, n) -> o.isEmpty() || n.isEmpty() ? Optional.empty() : n.get().isBefore(o.get()) ? n : o)
.orElseGet(() -> super.readyAt(change, Optional.empty()));
}
/**
* Time of completion of its dependencies, if all parts of the given change are contained in the change
* for this instance, or if no more jobs should run for this instance for the given change.
*/
@Override
Optional<Instant> completedAt(Change change, Optional<JobId> dependent) {
return ( (change.platform().isEmpty() || change.platform().equals(instance.change().platform()))
&& (change.revision().isEmpty() || change.revision().equals(instance.change().revision()))
|| step().steps().stream().noneMatch(step -> step.concerns(prod)))
? dependenciesCompletedAt(change, dependent).or(() -> Optional.of(Instant.EPOCH).filter(__ -> change.hasTargets()))
: Optional.empty();
}
@Override
public Optional<Instant> blockedUntil(Change change) {
for (Instant current = now; now.plus(Duration.ofDays(7)).isAfter(current); ) {
boolean blocked = false;
for (DeploymentSpec.ChangeBlocker blocker : spec.changeBlocker()) {
while ( blocker.window().includes(current)
&& now.plus(Duration.ofDays(7)).isAfter(current)
&& ( change.platform().isPresent() && blocker.blocksVersions()
|| change.revision().isPresent() && blocker.blocksRevisions())) {
blocked = true;
current = current.plus(Duration.ofHours(1)).truncatedTo(ChronoUnit.HOURS);
}
}
if ( ! blocked)
return current == now ? Optional.empty() : Optional.of(current);
}
return Optional.of(now.plusSeconds(1 << 30));
}
}
private static abstract class JobStepStatus extends StepStatus {
private final JobStatus job;
private final DeploymentStatus status;
private JobStepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, JobStatus job,
DeploymentStatus status) {
super(type, step, dependencies, job.id().application().instance());
this.job = requireNonNull(job);
this.status = requireNonNull(status);
}
@Override
public Optional<JobId> job() { return Optional.of(job.id()); }
@Override
public Optional<Instant> pausedUntil() {
return status.application().require(job.id().application().instance()).jobPause(job.id().type());
}
@Override
public Optional<Instant> coolingDownUntil(Change change) {
if (job.lastTriggered().isEmpty()) return Optional.empty();
if (job.lastCompleted().isEmpty()) return Optional.empty();
if (job.firstFailing().isEmpty() || ! job.firstFailing().get().hasEnded()) return Optional.empty();
Versions lastVersions = job.lastCompleted().get().versions();
if (change.platform().isPresent() && ! change.platform().get().equals(lastVersions.targetPlatform())) return Optional.empty();
if (change.revision().isPresent() && ! change.revision().get().equals(lastVersions.targetRevision())) return Optional.empty();
if (job.id().type().environment().isTest() && job.isNodeAllocationFailure()) return Optional.empty();
Instant firstFailing = job.firstFailing().get().end().get();
Instant lastCompleted = job.lastCompleted().get().end().get();
return firstFailing.equals(lastCompleted) ? Optional.of(lastCompleted)
: Optional.of(lastCompleted.plus(Duration.ofMinutes(10))
.plus(Duration.between(firstFailing, lastCompleted)
.dividedBy(2)))
.filter(status.now::isBefore);
}
private static JobStepStatus ofProductionDeployment(DeclaredZone step, List<StepStatus> dependencies,
DeploymentStatus status, JobStatus job) {
ZoneId zone = ZoneId.from(step.environment(), step.region().get());
Optional<Deployment> existingDeployment = Optional.ofNullable(status.application().require(job.id().application().instance())
.deployments().get(zone));
return new JobStepStatus(StepType.deployment, step, dependencies, job, status) {
@Override
public Optional<Instant> readyAt(Change change, Optional<JobId> dependent) {
Optional<Instant> readyAt = super.readyAt(change, dependent);
Optional<Instant> testedAt = status.verifiedAt(job.id(), Versions.from(change, status.application, existingDeployment, status.fallbackPlatform(change, job.id())));
if (readyAt.isEmpty() || testedAt.isEmpty()) return Optional.empty();
return readyAt.get().isAfter(testedAt.get()) ? readyAt : testedAt;
}
/** Complete if deployment is on pinned version, and last successful deployment, or if given versions is strictly a downgrade, and this isn't forced by a pin. */
@Override
Optional<Instant> completedAt(Change change, Optional<JobId> dependent) {
if ( change.isPinned()
&& change.platform().isPresent()
&& ! existingDeployment.map(Deployment::version).equals(change.platform()))
return Optional.empty();
if ( change.revision().isPresent()
&& ! existingDeployment.map(Deployment::revision).equals(change.revision())
&& dependent.equals(job()))
return Optional.empty();
Change fullChange = status.application().require(job.id().application().instance()).change();
if (existingDeployment.map(deployment -> ! (change.upgrades(deployment.version()) || change.upgrades(deployment.revision()))
&& (fullChange.downgrades(deployment.version()) || fullChange.downgrades(deployment.revision())))
.orElse(false))
return job.lastCompleted().flatMap(Run::end);
Optional<Instant> end = Optional.empty();
for (Run run : job.runs().descendingMap().values()) {
if (run.versions().targetsMatch(change)) {
if (run.hasSucceeded()) end = run.end();
}
else if (dependent.equals(job()))
break;
}
return end;
}
};
}
private static JobStepStatus ofProductionTest(DeclaredTest step, List<StepStatus> dependencies,
DeploymentStatus status, JobStatus job) {
JobId prodId = new JobId(job.id().application(), JobType.deploymentTo(job.id().type().zone()));
return new JobStepStatus(StepType.test, step, dependencies, job, status) {
@Override
Optional<Instant> readyAt(Change change, Optional<JobId> dependent) {
Optional<Instant> readyAt = super.readyAt(change, dependent);
Optional<Instant> deployedAt = status.jobSteps().get(prodId).completedAt(change, Optional.of(prodId));
if (readyAt.isEmpty() || deployedAt.isEmpty()) return Optional.empty();
return readyAt.get().isAfter(deployedAt.get()) ? readyAt : deployedAt;
}
@Override
Optional<Instant> completedAt(Change change, Optional<JobId> dependent) {
Optional<Instant> deployedAt = status.jobSteps().get(prodId).completedAt(change, Optional.of(prodId));
return (dependent.equals(job()) ? job.lastTriggered().filter(run -> deployedAt.map(at -> ! run.start().isBefore(at)).orElse(false)).stream()
: job.runs().values().stream())
.filter(Run::hasSucceeded)
.filter(run -> run.versions().targetsMatch(change))
.flatMap(run -> run.end().stream()).findFirst();
}
};
}
private static JobStepStatus ofTestDeployment(DeclaredZone step, List<StepStatus> dependencies,
DeploymentStatus status, JobStatus job, boolean declared) {
return new JobStepStatus(StepType.test, step, dependencies, job, status) {
@Override
Optional<Instant> completedAt(Change change, Optional<JobId> dependent) {
return RunList.from(job)
.matching(run -> dependent.flatMap(status::deploymentFor)
.map(deployment -> run.versions().targetsMatch(Versions.from(change,
status.application,
Optional.of(deployment),
status.fallbackPlatform(change, dependent.get()))))
.orElseGet(() -> (change.platform().isEmpty() || change.platform().get().equals(run.versions().targetPlatform()))
&& (change.revision().isEmpty() || change.revision().get().equals(run.versions().targetRevision()))))
.matching(Run::hasSucceeded)
.matching(run -> dependent.isEmpty() || status.findCloud(dependent.get().type()).equals(status.findCloud(run.id().type())))
.asList().stream()
.map(run -> run.end().get())
.max(naturalOrder());
}
@Override
public boolean isDeclared() { return declared; }
};
}
}
public static class Job {
private final JobType type;
private final Versions versions;
private final Optional<Instant> readyAt;
private final Change change;
public Job(JobType type, Versions versions, Optional<Instant> readyAt, Change change) {
this.type = type;
this.versions = type.isSystemTest() ? versions.withoutSources() : versions;
this.readyAt = readyAt;
this.change = change;
}
public JobType type() {
return type;
}
public Versions versions() {
return versions;
}
public Optional<Instant> readyAt() {
return readyAt;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Job job = (Job) o;
return type.zone().equals(job.type.zone()) && versions.equals(job.versions) && readyAt.equals(job.readyAt) && change.equals(job.change);
}
@Override
public int hashCode() {
return Objects.hash(type.zone(), versions, readyAt, change);
}
@Override
public String toString() {
return change + " with versions " + versions + ", ready at " + readyAt;
}
}
} | class DeploymentStatus {
private static <T> List<T> union(List<T> first, List<T> second) {
return Stream.concat(first.stream(), second.stream()).distinct().collect(toUnmodifiableList());
}
private final Application application;
private final JobList allJobs;
private final VersionStatus versionStatus;
private final Version systemVersion;
private final Function<InstanceName, VersionCompatibility> versionCompatibility;
private final ZoneRegistry zones;
private final Instant now;
private final Map<JobId, StepStatus> jobSteps;
private final List<StepStatus> allSteps;
public DeploymentStatus(Application application, Function<JobId, JobStatus> allJobs, ZoneRegistry zones, VersionStatus versionStatus,
Version systemVersion, Function<InstanceName, VersionCompatibility> versionCompatibility, Instant now) {
this.application = requireNonNull(application);
this.zones = zones;
this.versionStatus = requireNonNull(versionStatus);
this.systemVersion = requireNonNull(systemVersion);
this.versionCompatibility = versionCompatibility;
this.now = requireNonNull(now);
List<StepStatus> allSteps = new ArrayList<>();
Map<JobId, JobStatus> jobs = new HashMap<>();
this.jobSteps = jobDependencies(application.deploymentSpec(), allSteps, job -> jobs.computeIfAbsent(job, allJobs));
this.allSteps = Collections.unmodifiableList(allSteps);
this.allJobs = JobList.from(jobSteps.keySet().stream().map(allJobs).collect(toList()));
}
private JobType systemTest(JobType dependent) {
return JobType.systemTest(zones, dependent == null ? null : findCloud(dependent));
}
private JobType stagingTest(JobType dependent) {
return JobType.stagingTest(zones, dependent == null ? null : findCloud(dependent));
}
/** The application this deployment status concerns. */
public Application application() {
return application;
}
/** A filterable list of the status of all jobs for this application. */
public JobList jobs() {
return allJobs;
}
/** Whether any jobs both dependent on the dependency, and a dependency for the dependent, are failing. */
private boolean hasFailures(StepStatus dependency, StepStatus dependent) {
Set<StepStatus> dependents = new HashSet<>();
fillDependents(dependency, new HashSet<>(), dependents, dependent);
Set<JobId> criticalJobs = dependents.stream().flatMap(step -> step.job().stream()).collect(toSet());
return ! allJobs.matching(job -> criticalJobs.contains(job.id()))
.failingHard()
.isEmpty();
}
private boolean fillDependents(StepStatus dependency, Set<StepStatus> visited, Set<StepStatus> dependents, StepStatus current) {
if (visited.contains(current))
return dependents.contains(current);
if (dependency == current)
dependents.add(current);
else
for (StepStatus dep : current.dependencies)
if (fillDependents(dependency, visited, dependents, dep))
dependents.add(current);
visited.add(current);
return dependents.contains(current);
}
/** Whether any job is failing on versions selected by the given filter, with errors other than lack of capacity in a test zone.. */
public boolean hasFailures(Predicate<RevisionId> revisionFilter) {
return ! allJobs.failingHard()
.matching(job -> revisionFilter.test(job.lastTriggered().get().versions().targetRevision()))
.isEmpty();
}
/** Whether any jobs of this application are failing with other errors than lack of capacity in a test zone. */
public boolean hasFailures() {
return ! allJobs.failingHard().isEmpty();
}
/** All job statuses, by job type, for the given instance. */
public Map<JobType, JobStatus> instanceJobs(InstanceName instance) {
return allJobs.asList().stream()
.filter(job -> job.id().application().equals(application.id().instance(instance)))
.collect(CustomCollectors.toLinkedMap(job -> job.id().type(), Function.identity()));
}
/** Filterable job status lists for each instance of this application. */
public Map<ApplicationId, JobList> instanceJobs() {
return allJobs.groupingBy(job -> job.id().application());
}
/**
* The set of jobs that need to run for the changes of each instance of the application to be considered complete,
* and any test jobs for any outstanding change, which will likely be needed to later deploy this change.
*/
public Map<JobId, List<Job>> jobsToRun() {
if (application.revisions().last().isEmpty()) return Map.of();
Map<InstanceName, Change> changes = new LinkedHashMap<>();
for (InstanceName instance : application.deploymentSpec().instanceNames())
changes.put(instance, application.require(instance).change());
Map<JobId, List<Job>> jobs = jobsToRun(changes);
Map<InstanceName, Change> outstandingChanges = new LinkedHashMap<>();
for (InstanceName instance : application.deploymentSpec().instanceNames()) {
Change outstanding = outstandingChange(instance);
if (outstanding.hasTargets())
outstandingChanges.put(instance, outstanding.onTopOf(application.require(instance).change()));
}
var testJobs = jobsToRun(outstandingChanges, true).entrySet().stream()
.filter(entry -> ! entry.getKey().type().isProduction());
return Stream.concat(jobs.entrySet().stream(), testJobs)
.collect(collectingAndThen(toMap(Map.Entry::getKey,
Map.Entry::getValue,
DeploymentStatus::union,
LinkedHashMap::new),
Collections::unmodifiableMap));
}
private Map<JobId, List<Job>> jobsToRun(Map<InstanceName, Change> changes, boolean eagerTests) {
if (application.revisions().last().isEmpty()) return Map.of();
Map<JobId, List<Job>> productionJobs = new LinkedHashMap<>();
changes.forEach((instance, change) -> productionJobs.putAll(productionJobs(instance, change, eagerTests)));
Map<JobId, List<Job>> testJobs = testJobs(productionJobs);
Map<JobId, List<Job>> jobs = new LinkedHashMap<>(testJobs);
jobs.putAll(productionJobs);
jobSteps.forEach((job, step) -> {
if ( ! step.isDeclared() || job.type().isProduction() || jobs.containsKey(job))
return;
Change change = changes.get(job.application().instance());
if (change == null || ! change.hasTargets())
return;
Collection<Optional<JobId>> firstProductionJobsWithDeployment = jobSteps.keySet().stream()
.filter(jobId -> jobId.type().isProduction() && jobId.type().isDeployment())
.filter(jobId -> deploymentFor(jobId).isPresent())
.collect(groupingBy(jobId -> findCloud(jobId.type()),
Collectors.reducing((o, n) -> o)))
.values();
if (firstProductionJobsWithDeployment.isEmpty())
firstProductionJobsWithDeployment = List.of(Optional.empty());
for (Optional<JobId> firstProductionJobWithDeploymentInCloud : firstProductionJobsWithDeployment) {
Versions versions = Versions.from(change,
application,
firstProductionJobWithDeploymentInCloud.flatMap(this::deploymentFor),
fallbackPlatform(change, job));
if (step.completedAt(change, firstProductionJobWithDeploymentInCloud).isEmpty()) {
JobType actualType = job.type().isSystemTest() ? systemTest(firstProductionJobWithDeploymentInCloud.map(JobId::type).orElse(null))
: stagingTest(firstProductionJobWithDeploymentInCloud.map(JobId::type).orElse(null));
jobs.merge(job, List.of(new Job(actualType, versions, step.readyAt(change), change)), DeploymentStatus::union);
}
}
});
return Collections.unmodifiableMap(jobs);
}
/** Fall back to the newest, deployable platform, which is compatible with what we want to deploy. */
public Version fallbackPlatform(Change change, JobId job) {
Optional<Version> compileVersion = change.revision().map(application.revisions()::get).flatMap(ApplicationVersion::compileVersion);
if (compileVersion.isEmpty())
return systemVersion;
for (VespaVersion version : reversed(versionStatus.deployableVersions()))
if (versionCompatibility.apply(job.application().instance()).accept(version.versionNumber(), compileVersion.get()))
return version.versionNumber();
throw new IllegalArgumentException("no legal platform version exists in this system for compile version " + compileVersion.get());
}
/** The set of jobs that need to run for the given changes to be considered complete. */
public boolean hasCompleted(InstanceName instance, Change change) {
if ( ! application.deploymentSpec().requireInstance(instance).concerns(prod)) {
if (newestTested(instance, run -> run.versions().targetRevision()).map(change::downgrades).orElse(false)) return true;
if (newestTested(instance, run -> run.versions().targetPlatform()).map(change::downgrades).orElse(false)) return true;
}
return jobsToRun(Map.of(instance, change), false).isEmpty();
}
/** The set of jobs that need to run for the given changes to be considered complete. */
private Map<JobId, List<Job>> jobsToRun(Map<InstanceName, Change> changes) {
return jobsToRun(changes, false);
}
/** The step status for all steps in the deployment spec of this, which are jobs, in the same order as in the deployment spec. */
public Map<JobId, StepStatus> jobSteps() { return jobSteps; }
public Map<InstanceName, StepStatus> instanceSteps() {
ImmutableMap.Builder<InstanceName, StepStatus> instances = ImmutableMap.builder();
for (StepStatus status : allSteps)
if (status instanceof InstanceStatus)
instances.put(status.instance(), status);
return instances.build();
}
/** The step status for all relevant steps in the deployment spec of this, in the same order as in the deployment spec. */
public List<StepStatus> allSteps() {
return allSteps;
}
public Optional<Deployment> deploymentFor(JobId job) {
return Optional.ofNullable(application.require(job.application().instance())
.deployments().get(job.type().zone()));
}
private <T extends Comparable<T>> Optional<T> newestTested(InstanceName instance, Function<Run, T> runMapper) {
Set<CloudName> clouds = jobSteps.keySet().stream()
.filter(job -> job.type().isProduction())
.map(job -> findCloud(job.type()))
.collect(toSet());
List<ZoneId> testZones = new ArrayList<>();
if (application.deploymentSpec().requireInstance(instance).concerns(test)) {
if (clouds.isEmpty()) testZones.add(JobType.systemTest(zones, null).zone());
else for (CloudName cloud: clouds) testZones.add(JobType.systemTest(zones, cloud).zone());
}
if (application.deploymentSpec().requireInstance(instance).concerns(staging)) {
if (clouds.isEmpty()) testZones.add(JobType.stagingTest(zones, null).zone());
else for (CloudName cloud: clouds) testZones.add(JobType.stagingTest(zones, cloud).zone());
}
Map<ZoneId, Optional<T>> newestPerZone = instanceJobs().get(application.id().instance(instance))
.type(systemTest(null), stagingTest(null))
.asList().stream().flatMap(jobs -> jobs.runs().values().stream())
.filter(Run::hasSucceeded)
.collect(groupingBy(run -> run.id().type().zone(),
mapping(runMapper, Collectors.maxBy(naturalOrder()))));
return newestPerZone.keySet().containsAll(testZones)
? testZones.stream().map(newestPerZone::get)
.reduce((o, n) -> o.isEmpty() || n.isEmpty() ? Optional.empty() : n.get().compareTo(o.get()) < 0 ? n : o)
.orElse(Optional.empty())
: Optional.empty();
}
/**
* The change to a revision which all dependencies of the given instance has completed,
* which does not downgrade any deployments in the instance,
* which is not already rolling out to the instance, and
* which causes at least one job to run if deployed to the instance.
* For the "exclusive" revision upgrade policy it is the oldest such revision; otherwise, it is the latest.
*/
public Change outstandingChange(InstanceName instance) {
StepStatus status = instanceSteps().get(instance);
if (status == null) return Change.empty();
DeploymentInstanceSpec spec = application.deploymentSpec().requireInstance(instance);
boolean ascending = next == spec.revisionTarget();
int cumulativeRisk = 0;
int nextRisk = 0;
int skippedCumulativeRisk = 0;
Instant readySince = now;
Optional<RevisionId> newestRevision = application.productionDeployments()
.getOrDefault(instance, List.of()).stream()
.map(Deployment::revision).max(naturalOrder());
Change candidate = Change.empty();
for (ApplicationVersion version : application.revisions().deployable(ascending)) {
Change change = Change.of(version.id());
if ( newestRevision.isPresent() && change.downgrades(newestRevision.get())
|| ! application.require(instance).change().revision().map(change::upgrades).orElse(true)
|| hasCompleted(instance, change)) {
if (ascending) continue;
else return Change.empty();
}
skippedCumulativeRisk += version.risk();
nextRisk = nextRisk > 0 ? nextRisk : version.risk();
Optional<Instant> readyAt = status.dependenciesCompletedAt(Change.of(version.id()), Optional.empty());
if (readyAt.map(now::isBefore).orElse(true)) continue;
cumulativeRisk += skippedCumulativeRisk;
skippedCumulativeRisk = 0;
nextRisk = 0;
if (cumulativeRisk >= spec.maxRisk())
return candidate.equals(Change.empty()) ? change : candidate;
if (readyAt.get().isBefore(readySince)) readySince = readyAt.get();
candidate = change;
}
return instanceJobs(instance).values().stream().allMatch(jobs -> jobs.lastTriggered().isEmpty())
|| cumulativeRisk >= spec.minRisk()
|| cumulativeRisk + nextRisk > spec.maxRisk()
|| ! now.isBefore(readySince.plus(Duration.ofHours(spec.maxIdleHours())))
? candidate : Change.empty();
}
/** Earliest instant when job was triggered with given versions, or both system and staging tests were successful. */
public Optional<Instant> verifiedAt(JobId job, Versions versions) {
Optional<Instant> triggeredAt = allJobs.get(job)
.flatMap(status -> status.runs().values().stream()
.filter(run -> run.versions().equals(versions))
.findFirst())
.map(Run::start);
Optional<Instant> systemTestedAt = testedAt(job.application(), systemTest(null), versions);
Optional<Instant> stagingTestedAt = testedAt(job.application(), stagingTest(null), versions);
if (systemTestedAt.isEmpty() || stagingTestedAt.isEmpty()) return triggeredAt;
Optional<Instant> testedAt = systemTestedAt.get().isAfter(stagingTestedAt.get()) ? systemTestedAt : stagingTestedAt;
return triggeredAt.isPresent() && triggeredAt.get().isBefore(testedAt.get()) ? triggeredAt : testedAt;
}
/** Earliest instant when versions were tested for the given instance */
private Optional<Instant> testedAt(ApplicationId instance, JobType type, Versions versions) {
return declaredTest(instance, type).map(__ -> allJobs.instance(instance.instance()))
.orElse(allJobs)
.type(type).asList().stream()
.flatMap(status -> RunList.from(status)
.on(versions)
.matching(run -> run.id().type().zone().equals(type.zone()))
.matching(Run::hasSucceeded)
.asList().stream()
.map(Run::start))
.min(naturalOrder());
}
private Map<JobId, List<Job>> productionJobs(InstanceName instance, Change change, boolean assumeUpgradesSucceed) {
Map<JobId, List<Job>> jobs = new LinkedHashMap<>();
jobSteps.forEach((job, step) -> {
if ( ! job.application().instance().equals(instance) || ! job.type().isProduction())
return;
if (step.completedAt(change, Optional.of(job)).isPresent())
return;
Optional<Deployment> deployment = deploymentFor(job);
Optional<Version> existingPlatform = deployment.map(Deployment::version);
Optional<RevisionId> existingRevision = deployment.map(Deployment::revision);
boolean deployingCompatibilityChange = areIncompatible(existingPlatform, change.revision(), job)
|| areIncompatible(change.platform(), existingRevision, job);
if (assumeUpgradesSucceed) {
if (deployingCompatibilityChange)
return;
Change currentChange = application.require(instance).change();
Versions target = Versions.from(currentChange, application, deployment, fallbackPlatform(currentChange, job));
existingPlatform = Optional.of(target.targetPlatform());
existingRevision = Optional.of(target.targetRevision());
}
List<Job> toRun = new ArrayList<>();
List<Change> changes = deployingCompatibilityChange ? List.of(change) : changes(job, step, change);
for (Change partial : changes) {
Job jobToRun = new Job(job.type(),
Versions.from(partial, application, existingPlatform, existingRevision, fallbackPlatform(partial, job)),
step.readyAt(partial, Optional.of(job)),
partial);
toRun.add(jobToRun);
existingPlatform = Optional.of(jobToRun.versions.targetPlatform());
existingRevision = Optional.of(jobToRun.versions.targetRevision());
}
jobs.put(job, toRun);
});
return jobs;
}
private boolean areIncompatible(Optional<Version> platform, Optional<RevisionId> revision, JobId job) {
Optional<Version> compileVersion = revision.map(application.revisions()::get)
.flatMap(ApplicationVersion::compileVersion);
return platform.isPresent()
&& compileVersion.isPresent()
&& versionCompatibility.apply(job.application().instance()).refuse(platform.get(), compileVersion.get());
}
/** Changes to deploy with the given job, possibly split in two steps. */
private List<Change> changes(JobId job, StepStatus step, Change change) {
if (change.platform().isEmpty() || change.revision().isEmpty() || change.isPinned())
return List.of(change);
if ( step.completedAt(change.withoutApplication(), Optional.of(job)).isPresent()
|| step.completedAt(change.withoutPlatform(), Optional.of(job)).isPresent())
return List.of(change);
JobId deployment = new JobId(job.application(), JobType.deploymentTo(job.type().zone()));
UpgradeRollout rollout = application.deploymentSpec().requireInstance(job.application().instance()).upgradeRollout();
if (job.type().isTest()) {
Optional<Instant> platformDeployedAt = jobSteps.get(deployment).completedAt(change.withoutApplication(), Optional.of(deployment));
Optional<Instant> revisionDeployedAt = jobSteps.get(deployment).completedAt(change.withoutPlatform(), Optional.of(deployment));
if (platformDeployedAt.isEmpty() && revisionDeployedAt.isPresent()) return List.of(change.withoutPlatform(), change);
if (platformDeployedAt.isPresent() && revisionDeployedAt.isEmpty()) {
if (jobSteps.get(deployment).readyAt(change, Optional.of(deployment))
.map(ready -> ! now.isBefore(ready)).orElse(false)) {
switch (rollout) {
case separate: return hasFailures(jobSteps.get(deployment), jobSteps.get(job)) ? List.of(change) : List.of(change.withoutApplication(), change);
case leading: return List.of(change);
case simultaneous: return List.of(change.withoutPlatform(), change);
}
}
return List.of(change.withoutApplication(), change);
}
}
Optional<Instant> platformReadyAt = step.dependenciesCompletedAt(change.withoutApplication(), Optional.of(job));
Optional<Instant> revisionReadyAt = step.dependenciesCompletedAt(change.withoutPlatform(), Optional.of(job));
if (platformReadyAt.isEmpty() && revisionReadyAt.isEmpty()) {
switch (rollout) {
case separate: return List.of(change.withoutApplication(), change);
case leading: return List.of(change);
case simultaneous: return List.of(change.withoutPlatform(), change);
}
}
if (platformReadyAt.isEmpty()) return List.of(change.withoutPlatform(), change);
if (revisionReadyAt.isEmpty()) {
return List.of(change.withoutApplication(), change);
}
boolean platformReadyFirst = platformReadyAt.get().isBefore(revisionReadyAt.get());
boolean revisionReadyFirst = revisionReadyAt.get().isBefore(platformReadyAt.get());
boolean failingUpgradeOnlyTests = ! jobs().type(systemTest(job.type()), stagingTest(job.type()))
.failingHardOn(Versions.from(change.withoutApplication(), application, deploymentFor(job), systemVersion))
.isEmpty();
switch (rollout) {
case separate:
return (platformReadyFirst || platformReadyAt.get().equals(Instant.EPOCH))
? step.job().flatMap(jobs()::get).flatMap(JobStatus::firstFailing).isPresent() || failingUpgradeOnlyTests
? List.of(change)
: List.of(change.withoutApplication(), change)
: revisionReadyFirst
? List.of(change.withoutPlatform(), change)
: List.of(change);
case leading:
return List.of(change);
case simultaneous:
return platformReadyFirst ? List.of(change) : List.of(change.withoutPlatform(), change);
default: throw new IllegalStateException("Unknown upgrade rollout policy");
}
}
/** The test jobs that need to run prior to the given production deployment jobs. */
public Map<JobId, List<Job>> testJobs(Map<JobId, List<Job>> jobs) {
Map<JobId, List<Job>> testJobs = new LinkedHashMap<>();
jobs.forEach((job, versionsList) -> {
for (JobType testType : List.of(systemTest(job.type()), stagingTest(job.type()))) {
if (job.type().isProduction() && job.type().isDeployment()) {
declaredTest(job.application(), testType).ifPresent(testJob -> {
for (Job productionJob : versionsList)
if (allJobs.successOn(testType, productionJob.versions()).asList().isEmpty())
testJobs.merge(testJob, List.of(new Job(testJob.type(),
productionJob.versions(),
jobSteps().get(testJob).readyAt(productionJob.change),
productionJob.change)),
DeploymentStatus::union);
});
}
}
});
jobs.forEach((job, versionsList) -> {
for (JobType testType : List.of(systemTest(job.type()), stagingTest(job.type()))) {
for (Job productionJob : versionsList)
if ( job.type().isProduction() && job.type().isDeployment()
&& allJobs.successOn(testType, productionJob.versions()).asList().isEmpty()
&& testJobs.keySet().stream()
.noneMatch(test -> test.type().equals(testType) && test.type().zone().equals(testType.zone())
&& testJobs.get(test).stream().anyMatch(testJob -> testJob.versions().equals(productionJob.versions())))) {
JobId testJob = firstDeclaredOrElseImplicitTest(testType);
testJobs.merge(testJob,
List.of(new Job(testJob.type(),
productionJob.versions(),
jobSteps.get(testJob).readyAt(productionJob.change),
productionJob.change)),
DeploymentStatus::union);
}
}
});
return Collections.unmodifiableMap(testJobs);
}
private JobId firstDeclaredOrElseImplicitTest(JobType testJob) {
return application.deploymentSpec().instanceNames().stream()
.map(name -> new JobId(application.id().instance(name), testJob))
.filter(jobSteps::containsKey)
.min(comparing(id -> ! jobSteps.get(id).isDeclared())).orElseThrow();
}
/** JobId of any declared test of the given type, for the given instance. */
private Optional<JobId> declaredTest(ApplicationId instanceId, JobType testJob) {
JobId jobId = new JobId(instanceId, testJob);
return jobSteps.containsKey(jobId) && jobSteps.get(jobId).isDeclared() ? Optional.of(jobId) : Optional.empty();
}
/** A DAG of the dependencies between the primitive steps in the spec, with iteration order equal to declaration order. */
private Map<JobId, StepStatus> jobDependencies(DeploymentSpec spec, List<StepStatus> allSteps, Function<JobId, JobStatus> jobs) {
if (DeploymentSpec.empty.equals(spec))
return Map.of();
Map<JobId, StepStatus> dependencies = new LinkedHashMap<>();
List<StepStatus> previous = List.of();
for (DeploymentSpec.Step step : spec.steps())
previous = fillStep(dependencies, allSteps, step, previous, null, jobs,
instanceWithImplicitTest(test, spec),
instanceWithImplicitTest(staging, spec));
return Collections.unmodifiableMap(dependencies);
}
private static InstanceName instanceWithImplicitTest(Environment environment, DeploymentSpec spec) {
InstanceName first = null;
for (DeploymentInstanceSpec step : spec.instances()) {
if (step.concerns(environment)) return null;
first = first != null ? first : step.name();
}
return first;
}
/** Adds the primitive steps contained in the given step, which depend on the given previous primitives, to the dependency graph. */
private List<StepStatus> fillStep(Map<JobId, StepStatus> dependencies, List<StepStatus> allSteps, DeploymentSpec.Step step,
List<StepStatus> previous, InstanceName instance, Function<JobId, JobStatus> jobs,
InstanceName implicitSystemTest, InstanceName implicitStagingTest) {
if (step.steps().isEmpty() && ! (step instanceof DeploymentInstanceSpec)) {
if (instance == null)
return previous;
if ( ! step.delay().isZero()) {
StepStatus stepStatus = new DelayStatus((DeploymentSpec.Delay) step, previous, instance);
allSteps.add(stepStatus);
return List.of(stepStatus);
}
JobType jobType;
JobId jobId;
StepStatus stepStatus;
if (step.concerns(test) || step.concerns(staging)) {
jobType = step.concerns(test) ? systemTest(null) : stagingTest(null);
jobId = new JobId(application.id().instance(instance), jobType);
stepStatus = JobStepStatus.ofTestDeployment((DeclaredZone) step, List.of(), this, jobs.apply(jobId), true);
previous = new ArrayList<>(previous);
previous.add(stepStatus);
}
else if (step.isTest()) {
jobType = JobType.test(((DeclaredTest) step).region());
jobId = new JobId(application.id().instance(instance), jobType);
stepStatus = JobStepStatus.ofProductionTest((DeclaredTest) step, previous, this, jobs.apply(jobId));
previous = List.of(stepStatus);
}
else if (step.concerns(prod)) {
jobType = JobType.prod(((DeclaredZone) step).region().get());
jobId = new JobId(application.id().instance(instance), jobType);
stepStatus = JobStepStatus.ofProductionDeployment((DeclaredZone) step, previous, this, jobs.apply(jobId));
previous = List.of(stepStatus);
}
else return previous;
allSteps.add(stepStatus);
dependencies.put(jobId, stepStatus);
return previous;
}
if (step instanceof DeploymentInstanceSpec) {
DeploymentInstanceSpec spec = ((DeploymentInstanceSpec) step);
StepStatus instanceStatus = new InstanceStatus(spec, previous, now, application.require(spec.name()), this);
instance = spec.name();
allSteps.add(instanceStatus);
previous = List.of(instanceStatus);
if (instance.equals(implicitSystemTest)) {
JobId job = new JobId(application.id().instance(instance), systemTest(null));
JobStepStatus testStatus = JobStepStatus.ofTestDeployment(new DeclaredZone(test), List.of(),
this, jobs.apply(job), false);
dependencies.put(job, testStatus);
allSteps.add(testStatus);
}
if (instance.equals(implicitStagingTest)) {
JobId job = new JobId(application.id().instance(instance), stagingTest(null));
JobStepStatus testStatus = JobStepStatus.ofTestDeployment(new DeclaredZone(staging), List.of(),
this, jobs.apply(job), false);
dependencies.put(job, testStatus);
allSteps.add(testStatus);
}
}
if (step.isOrdered()) {
for (DeploymentSpec.Step nested : step.steps())
previous = fillStep(dependencies, allSteps, nested, previous, instance, jobs, implicitSystemTest, implicitStagingTest);
return previous;
}
List<StepStatus> parallel = new ArrayList<>();
for (DeploymentSpec.Step nested : step.steps())
parallel.addAll(fillStep(dependencies, allSteps, nested, previous, instance, jobs, implicitSystemTest, implicitStagingTest));
return List.copyOf(parallel);
}
public enum StepType {
/** An instance — completion marks a change as ready for the jobs contained in it. */
instance,
/** A timed delay. */
delay,
/** A system, staging or production test. */
test,
/** A production deployment. */
deployment,
}
/**
* Used to represent all steps — explicit and implicit — that may run in order to complete deployment of a change.
*
* Each node contains a step describing the node,
* a list of steps which need to be complete before the step may start,
* a list of jobs from which completion of the step is computed, and
* optionally, an instance name used to identify a job type for the step,
*
* The completion criterion for each type of step is implemented in subclasses of this.
*/
public static abstract class StepStatus {
private final StepType type;
private final DeploymentSpec.Step step;
private final List<StepStatus> dependencies;
private final InstanceName instance;
private StepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, InstanceName instance) {
this.type = requireNonNull(type);
this.step = requireNonNull(step);
this.dependencies = List.copyOf(dependencies);
this.instance = instance;
}
/** The type of step this is. */
public final StepType type() { return type; }
/** The step defining this. */
public final DeploymentSpec.Step step() { return step; }
/** The list of steps that need to be complete before this may start. */
public final List<StepStatus> dependencies() { return dependencies; }
/** The instance of this. */
public final InstanceName instance() { return instance; }
/** The id of the job this corresponds to, if any. */
public Optional<JobId> job() { return Optional.empty(); }
/** The time at which this is, or was, complete on the given change and / or versions. */
public Optional<Instant> completedAt(Change change) { return completedAt(change, Optional.empty()); }
/** The time at which this is, or was, complete on the given change and / or versions. */
abstract Optional<Instant> completedAt(Change change, Optional<JobId> dependent);
/** The time at which this step is ready to run the specified change and / or versions. */
public Optional<Instant> readyAt(Change change) { return readyAt(change, Optional.empty()); }
/** The time at which this step is ready to run the specified change and / or versions. */
Optional<Instant> readyAt(Change change, Optional<JobId> dependent) {
return dependenciesCompletedAt(change, dependent)
.map(ready -> Stream.of(blockedUntil(change),
pausedUntil(),
coolingDownUntil(change))
.flatMap(Optional::stream)
.reduce(ready, maxBy(naturalOrder())));
}
/** The time at which all dependencies completed on the given change and / or versions. */
Optional<Instant> dependenciesCompletedAt(Change change, Optional<JobId> dependent) {
Instant latest = Instant.EPOCH;
for (StepStatus step : dependencies) {
Optional<Instant> completedAt = step.completedAt(change, dependent);
if (completedAt.isEmpty()) return Optional.empty();
latest = latest.isBefore(completedAt.get()) ? completedAt.get() : latest;
}
return Optional.of(latest);
}
/** The time until which this step is blocked by a change blocker. */
public Optional<Instant> blockedUntil(Change change) { return Optional.empty(); }
/** The time until which this step is paused by user intervention. */
public Optional<Instant> pausedUntil() { return Optional.empty(); }
/** The time until which this step is cooling down, due to consecutive failures. */
public Optional<Instant> coolingDownUntil(Change change) { return Optional.empty(); }
/** Whether this step is declared in the deployment spec, or is an implicit step. */
public boolean isDeclared() { return true; }
}
private static class DelayStatus extends StepStatus {
private DelayStatus(DeploymentSpec.Delay step, List<StepStatus> dependencies, InstanceName instance) {
super(StepType.delay, step, dependencies, instance);
}
@Override
Optional<Instant> completedAt(Change change, Optional<JobId> dependent) {
return readyAt(change, dependent).map(completion -> completion.plus(step().delay()));
}
}
private static class InstanceStatus extends StepStatus {
private final DeploymentInstanceSpec spec;
private final Instant now;
private final Instance instance;
private final DeploymentStatus status;
private InstanceStatus(DeploymentInstanceSpec spec, List<StepStatus> dependencies, Instant now,
Instance instance, DeploymentStatus status) {
super(StepType.instance, spec, dependencies, spec.name());
this.spec = spec;
this.now = now;
this.instance = instance;
this.status = status;
}
/** The time at which this step is ready to run the specified change and / or versions. */
@Override
public Optional<Instant> readyAt(Change change) {
return status.jobSteps.keySet().stream()
.filter(job -> job.type().isProduction() && job.application().instance().equals(instance.name()))
.map(job -> super.readyAt(change, Optional.of(job)))
.reduce((o, n) -> o.isEmpty() || n.isEmpty() ? Optional.empty() : n.get().isBefore(o.get()) ? n : o)
.orElseGet(() -> super.readyAt(change, Optional.empty()));
}
/**
* Time of completion of its dependencies, if all parts of the given change are contained in the change
* for this instance, or if no more jobs should run for this instance for the given change.
*/
@Override
Optional<Instant> completedAt(Change change, Optional<JobId> dependent) {
return ( (change.platform().isEmpty() || change.platform().equals(instance.change().platform()))
&& (change.revision().isEmpty() || change.revision().equals(instance.change().revision()))
|| step().steps().stream().noneMatch(step -> step.concerns(prod)))
? dependenciesCompletedAt(change, dependent).or(() -> Optional.of(Instant.EPOCH).filter(__ -> change.hasTargets()))
: Optional.empty();
}
@Override
public Optional<Instant> blockedUntil(Change change) {
for (Instant current = now; now.plus(Duration.ofDays(7)).isAfter(current); ) {
boolean blocked = false;
for (DeploymentSpec.ChangeBlocker blocker : spec.changeBlocker()) {
while ( blocker.window().includes(current)
&& now.plus(Duration.ofDays(7)).isAfter(current)
&& ( change.platform().isPresent() && blocker.blocksVersions()
|| change.revision().isPresent() && blocker.blocksRevisions())) {
blocked = true;
current = current.plus(Duration.ofHours(1)).truncatedTo(ChronoUnit.HOURS);
}
}
if ( ! blocked)
return current == now ? Optional.empty() : Optional.of(current);
}
return Optional.of(now.plusSeconds(1 << 30));
}
}
private static abstract class JobStepStatus extends StepStatus {
private final JobStatus job;
private final DeploymentStatus status;
private JobStepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, JobStatus job,
DeploymentStatus status) {
super(type, step, dependencies, job.id().application().instance());
this.job = requireNonNull(job);
this.status = requireNonNull(status);
}
@Override
public Optional<JobId> job() { return Optional.of(job.id()); }
@Override
public Optional<Instant> pausedUntil() {
return status.application().require(job.id().application().instance()).jobPause(job.id().type());
}
@Override
public Optional<Instant> coolingDownUntil(Change change) {
if (job.lastTriggered().isEmpty()) return Optional.empty();
if (job.lastCompleted().isEmpty()) return Optional.empty();
if (job.firstFailing().isEmpty() || ! job.firstFailing().get().hasEnded()) return Optional.empty();
Versions lastVersions = job.lastCompleted().get().versions();
if (change.platform().isPresent() && ! change.platform().get().equals(lastVersions.targetPlatform())) return Optional.empty();
if (change.revision().isPresent() && ! change.revision().get().equals(lastVersions.targetRevision())) return Optional.empty();
if (job.id().type().environment().isTest() && job.isNodeAllocationFailure()) return Optional.empty();
Instant firstFailing = job.firstFailing().get().end().get();
Instant lastCompleted = job.lastCompleted().get().end().get();
return firstFailing.equals(lastCompleted) ? Optional.of(lastCompleted)
: Optional.of(lastCompleted.plus(Duration.ofMinutes(10))
.plus(Duration.between(firstFailing, lastCompleted)
.dividedBy(2)))
.filter(status.now::isBefore);
}
private static JobStepStatus ofProductionDeployment(DeclaredZone step, List<StepStatus> dependencies,
DeploymentStatus status, JobStatus job) {
ZoneId zone = ZoneId.from(step.environment(), step.region().get());
Optional<Deployment> existingDeployment = Optional.ofNullable(status.application().require(job.id().application().instance())
.deployments().get(zone));
return new JobStepStatus(StepType.deployment, step, dependencies, job, status) {
@Override
public Optional<Instant> readyAt(Change change, Optional<JobId> dependent) {
Optional<Instant> readyAt = super.readyAt(change, dependent);
Optional<Instant> testedAt = status.verifiedAt(job.id(), Versions.from(change, status.application, existingDeployment, status.fallbackPlatform(change, job.id())));
if (readyAt.isEmpty() || testedAt.isEmpty()) return Optional.empty();
return readyAt.get().isAfter(testedAt.get()) ? readyAt : testedAt;
}
/** Complete if deployment is on pinned version, and last successful deployment, or if given versions is strictly a downgrade, and this isn't forced by a pin. */
@Override
Optional<Instant> completedAt(Change change, Optional<JobId> dependent) {
if ( change.isPinned()
&& change.platform().isPresent()
&& ! existingDeployment.map(Deployment::version).equals(change.platform()))
return Optional.empty();
if ( change.revision().isPresent()
&& ! existingDeployment.map(Deployment::revision).equals(change.revision())
&& dependent.equals(job()))
return Optional.empty();
Change fullChange = status.application().require(job.id().application().instance()).change();
if (existingDeployment.map(deployment -> ! (change.upgrades(deployment.version()) || change.upgrades(deployment.revision()))
&& (fullChange.downgrades(deployment.version()) || fullChange.downgrades(deployment.revision())))
.orElse(false))
return job.lastCompleted().flatMap(Run::end);
Optional<Instant> end = Optional.empty();
for (Run run : job.runs().descendingMap().values()) {
if (run.versions().targetsMatch(change)) {
if (run.hasSucceeded()) end = run.end();
}
else if (dependent.equals(job()))
break;
}
return end;
}
};
}
private static JobStepStatus ofProductionTest(DeclaredTest step, List<StepStatus> dependencies,
DeploymentStatus status, JobStatus job) {
JobId prodId = new JobId(job.id().application(), JobType.deploymentTo(job.id().type().zone()));
return new JobStepStatus(StepType.test, step, dependencies, job, status) {
@Override
Optional<Instant> readyAt(Change change, Optional<JobId> dependent) {
Optional<Instant> readyAt = super.readyAt(change, dependent);
Optional<Instant> deployedAt = status.jobSteps().get(prodId).completedAt(change, Optional.of(prodId));
if (readyAt.isEmpty() || deployedAt.isEmpty()) return Optional.empty();
return readyAt.get().isAfter(deployedAt.get()) ? readyAt : deployedAt;
}
@Override
Optional<Instant> completedAt(Change change, Optional<JobId> dependent) {
Optional<Instant> deployedAt = status.jobSteps().get(prodId).completedAt(change, Optional.of(prodId));
return (dependent.equals(job()) ? job.lastTriggered().filter(run -> deployedAt.map(at -> ! run.start().isBefore(at)).orElse(false)).stream()
: job.runs().values().stream())
.filter(Run::hasSucceeded)
.filter(run -> run.versions().targetsMatch(change))
.flatMap(run -> run.end().stream()).findFirst();
}
};
}
private static JobStepStatus ofTestDeployment(DeclaredZone step, List<StepStatus> dependencies,
DeploymentStatus status, JobStatus job, boolean declared) {
return new JobStepStatus(StepType.test, step, dependencies, job, status) {
@Override
Optional<Instant> completedAt(Change change, Optional<JobId> dependent) {
return RunList.from(job)
.matching(run -> dependent.flatMap(status::deploymentFor)
.map(deployment -> run.versions().targetsMatch(Versions.from(change,
status.application,
Optional.of(deployment),
status.fallbackPlatform(change, dependent.get()))))
.orElseGet(() -> (change.platform().isEmpty() || change.platform().get().equals(run.versions().targetPlatform()))
&& (change.revision().isEmpty() || change.revision().get().equals(run.versions().targetRevision()))))
.matching(Run::hasSucceeded)
.matching(run -> dependent.isEmpty() || status.findCloud(dependent.get().type()).equals(status.findCloud(run.id().type())))
.asList().stream()
.map(run -> run.end().get())
.max(naturalOrder());
}
@Override
public boolean isDeclared() { return declared; }
};
}
}
public static class Job {
private final JobType type;
private final Versions versions;
private final Optional<Instant> readyAt;
private final Change change;
public Job(JobType type, Versions versions, Optional<Instant> readyAt, Change change) {
this.type = type;
this.versions = type.isSystemTest() ? versions.withoutSources() : versions;
this.readyAt = readyAt;
this.change = change;
}
public JobType type() {
return type;
}
public Versions versions() {
return versions;
}
public Optional<Instant> readyAt() {
return readyAt;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Job job = (Job) o;
return type.zone().equals(job.type.zone()) && versions.equals(job.versions) && readyAt.equals(job.readyAt) && change.equals(job.change);
}
@Override
public int hashCode() {
return Objects.hash(type.zone(), versions, readyAt, change);
}
@Override
public String toString() {
return change + " with versions " + versions + ", ready at " + readyAt;
}
}
} |
```suggestion ImmutableSDField source = schema.getField(sourceFieldName); ``` | private void makeCopyTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() == SummaryTransform.NONE) {
String source_field_name = summaryField.getSingleSource();
ImmutableSDField source = schema.getField(source_field_name);
if (source != null && source.usesStructOrMap() && summaryField.hasExplicitSingleSource()) {
summaryField.setTransform(SummaryTransform.COPY);
}
}
} | ImmutableSDField source = schema.getField(source_field_name); | private void makeCopyTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() == SummaryTransform.NONE) {
String sourceFieldName = summaryField.getSingleSource();
ImmutableSDField source = schema.getField(sourceFieldName);
if (source != null && source.usesStructOrMap() && summaryField.hasExplicitSingleSource()) {
summaryField.setTransform(SummaryTransform.COPY);
}
}
} | class SummaryConsistency extends Processor {
public SummaryConsistency(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(schema, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
public void process(boolean validate, boolean documentsOnly) {
for (DocumentSummary summary : schema.getSummaries().values()) {
if (summary.getName().equals("default")) continue;
for (SummaryField summaryField : summary.getSummaryFields().values()) {
assertConsistency(summaryField, schema, validate);
makeAttributeTransformIfAppropriate(summaryField, schema);
makeAttributeCombinerTransformIfAppropriate(summaryField, schema);
makeCopyTransformIfAppropriate(summaryField, schema);
}
}
}
private void assertConsistency(SummaryField summaryField, Schema schema, boolean validate) {
SummaryField existingDefault = schema.getSummariesInThis().get("default").getSummaryField(summaryField.getName());
if (existingDefault != null) {
if (validate)
assertConsistentTypes(existingDefault, summaryField);
makeConsistentWithDefaultOrThrow(existingDefault, summaryField);
}
else {
SummaryField existing = schema.getExplicitSummaryField(summaryField.getName());
if (existing == null) return;
if (validate)
assertConsistentTypes(existing, summaryField);
makeConsistentOrThrow(existing, summaryField, schema);
}
}
/** If the source is an attribute, make this use the attribute transform */
private void makeAttributeTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() != SummaryTransform.NONE) return;
Attribute attribute = schema.getAttribute(summaryField.getSingleSource());
if (attribute == null) return;
summaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
/** If the source is a complex field with only struct field attributes then make this use the attribute combiner transform */
private void makeAttributeCombinerTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() == SummaryTransform.NONE) {
String source_field_name = summaryField.getSingleSource();
ImmutableSDField source = schema.getField(source_field_name);
if (source != null && isComplexFieldWithOnlyStructFieldAttributes(source)) {
summaryField.setTransform(SummaryTransform.ATTRIBUTECOMBINER);
}
}
}
/*
* This function must be called after makeAttributeCombinerIfAppropriate().
*/
private void assertConsistentTypes(SummaryField existing, SummaryField seen) {
if (existing.getDataType() instanceof WeightedSetDataType && seen.getDataType() instanceof WeightedSetDataType &&
((WeightedSetDataType)existing.getDataType()).getNestedType().equals(((WeightedSetDataType)seen.getDataType()).getNestedType()))
return;
if ( ! compatibleTypes(seen.getDataType(), existing.getDataType()))
throw new IllegalArgumentException(existing.toLocateString() + " is inconsistent with " +
seen.toLocateString() + ": All declarations of the same summary field must have the same type");
}
private boolean compatibleTypes(DataType summaryType, DataType existingType) {
if (summaryType instanceof TensorDataType && existingType instanceof TensorDataType) {
return summaryType.isAssignableFrom(existingType);
}
return summaryType.equals(existingType);
}
private void makeConsistentOrThrow(SummaryField field1, SummaryField field2, Schema schema) {
if (field2.getTransform() == SummaryTransform.ATTRIBUTE && field1.getTransform() == SummaryTransform.NONE) {
Attribute attribute = schema.getAttribute(field1.getName());
if (attribute != null) {
field1.setTransform(SummaryTransform.ATTRIBUTE);
}
}
if (field2.getTransform().equals(SummaryTransform.NONE)) {
field2.setTransform(field1.getTransform());
}
else {
assertEqualTransform(field1,field2);
}
}
private void makeConsistentWithDefaultOrThrow(SummaryField defaultField, SummaryField newField) {
if (newField.getTransform().equals(SummaryTransform.NONE)) {
newField.setTransform(defaultField.getTransform());
}
else {
assertEqualTransform(defaultField,newField);
}
}
private void assertEqualTransform(SummaryField field1, SummaryField field2) {
if ( ! field2.getTransform().equals(field1.getTransform())) {
throw new IllegalArgumentException("Conflicting summary transforms. " + field2 + " is already defined as " +
field1 + ". A field with the same name " +
"can not have different transforms in different summary classes");
}
}
} | class SummaryConsistency extends Processor {
public SummaryConsistency(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(schema, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
public void process(boolean validate, boolean documentsOnly) {
for (DocumentSummary summary : schema.getSummaries().values()) {
if (summary.getName().equals("default")) continue;
for (SummaryField summaryField : summary.getSummaryFields().values()) {
assertConsistency(summaryField, schema, validate);
makeAttributeTransformIfAppropriate(summaryField, schema);
makeAttributeCombinerTransformIfAppropriate(summaryField, schema);
makeCopyTransformIfAppropriate(summaryField, schema);
}
}
}
private void assertConsistency(SummaryField summaryField, Schema schema, boolean validate) {
SummaryField existingDefault = schema.getSummariesInThis().get("default").getSummaryField(summaryField.getName());
if (existingDefault != null) {
if (validate)
assertConsistentTypes(existingDefault, summaryField);
makeConsistentWithDefaultOrThrow(existingDefault, summaryField);
}
else {
SummaryField existing = schema.getExplicitSummaryField(summaryField.getName());
if (existing == null) return;
if (validate)
assertConsistentTypes(existing, summaryField);
makeConsistentOrThrow(existing, summaryField, schema);
}
}
/** If the source is an attribute, make this use the attribute transform */
private void makeAttributeTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() != SummaryTransform.NONE) return;
Attribute attribute = schema.getAttribute(summaryField.getSingleSource());
if (attribute == null) return;
summaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
/** If the source is a complex field with only struct field attributes then make this use the attribute combiner transform */
private void makeAttributeCombinerTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() == SummaryTransform.NONE) {
String sourceFieldName = summaryField.getSingleSource();
ImmutableSDField source = schema.getField(sourceFieldName);
if (source != null && isComplexFieldWithOnlyStructFieldAttributes(source)) {
summaryField.setTransform(SummaryTransform.ATTRIBUTECOMBINER);
}
}
}
/*
* This function must be called after makeAttributeCombinerTransformIfAppropriate().
*/
private void assertConsistentTypes(SummaryField existing, SummaryField seen) {
if (existing.getDataType() instanceof WeightedSetDataType && seen.getDataType() instanceof WeightedSetDataType &&
((WeightedSetDataType)existing.getDataType()).getNestedType().equals(((WeightedSetDataType)seen.getDataType()).getNestedType()))
return;
if ( ! compatibleTypes(seen.getDataType(), existing.getDataType()))
throw new IllegalArgumentException(existing.toLocateString() + " is inconsistent with " +
seen.toLocateString() + ": All declarations of the same summary field must have the same type");
}
private boolean compatibleTypes(DataType summaryType, DataType existingType) {
if (summaryType instanceof TensorDataType && existingType instanceof TensorDataType) {
return summaryType.isAssignableFrom(existingType);
}
return summaryType.equals(existingType);
}
private void makeConsistentOrThrow(SummaryField field1, SummaryField field2, Schema schema) {
if (field2.getTransform() == SummaryTransform.ATTRIBUTE && field1.getTransform() == SummaryTransform.NONE) {
Attribute attribute = schema.getAttribute(field1.getName());
if (attribute != null) {
field1.setTransform(SummaryTransform.ATTRIBUTE);
}
}
if (field2.getTransform().equals(SummaryTransform.NONE)) {
field2.setTransform(field1.getTransform());
}
else {
assertEqualTransform(field1,field2);
}
}
private void makeConsistentWithDefaultOrThrow(SummaryField defaultField, SummaryField newField) {
if (newField.getTransform().equals(SummaryTransform.NONE)) {
newField.setTransform(defaultField.getTransform());
}
else {
assertEqualTransform(defaultField,newField);
}
}
private void assertEqualTransform(SummaryField field1, SummaryField field2) {
if ( ! field2.getTransform().equals(field1.getTransform())) {
throw new IllegalArgumentException("Conflicting summary transforms. " + field2 + " is already defined as " +
field1 + ". A field with the same name " +
"can not have different transforms in different summary classes");
}
}
} |
```suggestion String sourceFieldName = summaryField.getSingleSource(); ``` | private void makeCopyTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() == SummaryTransform.NONE) {
String source_field_name = summaryField.getSingleSource();
ImmutableSDField source = schema.getField(source_field_name);
if (source != null && source.usesStructOrMap() && summaryField.hasExplicitSingleSource()) {
summaryField.setTransform(SummaryTransform.COPY);
}
}
} | String source_field_name = summaryField.getSingleSource(); | private void makeCopyTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() == SummaryTransform.NONE) {
String sourceFieldName = summaryField.getSingleSource();
ImmutableSDField source = schema.getField(sourceFieldName);
if (source != null && source.usesStructOrMap() && summaryField.hasExplicitSingleSource()) {
summaryField.setTransform(SummaryTransform.COPY);
}
}
} | class SummaryConsistency extends Processor {
public SummaryConsistency(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(schema, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
public void process(boolean validate, boolean documentsOnly) {
for (DocumentSummary summary : schema.getSummaries().values()) {
if (summary.getName().equals("default")) continue;
for (SummaryField summaryField : summary.getSummaryFields().values()) {
assertConsistency(summaryField, schema, validate);
makeAttributeTransformIfAppropriate(summaryField, schema);
makeAttributeCombinerTransformIfAppropriate(summaryField, schema);
makeCopyTransformIfAppropriate(summaryField, schema);
}
}
}
private void assertConsistency(SummaryField summaryField, Schema schema, boolean validate) {
SummaryField existingDefault = schema.getSummariesInThis().get("default").getSummaryField(summaryField.getName());
if (existingDefault != null) {
if (validate)
assertConsistentTypes(existingDefault, summaryField);
makeConsistentWithDefaultOrThrow(existingDefault, summaryField);
}
else {
SummaryField existing = schema.getExplicitSummaryField(summaryField.getName());
if (existing == null) return;
if (validate)
assertConsistentTypes(existing, summaryField);
makeConsistentOrThrow(existing, summaryField, schema);
}
}
/** If the source is an attribute, make this use the attribute transform */
private void makeAttributeTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() != SummaryTransform.NONE) return;
Attribute attribute = schema.getAttribute(summaryField.getSingleSource());
if (attribute == null) return;
summaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
/** If the source is a complex field with only struct field attributes then make this use the attribute combiner transform */
private void makeAttributeCombinerTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() == SummaryTransform.NONE) {
String source_field_name = summaryField.getSingleSource();
ImmutableSDField source = schema.getField(source_field_name);
if (source != null && isComplexFieldWithOnlyStructFieldAttributes(source)) {
summaryField.setTransform(SummaryTransform.ATTRIBUTECOMBINER);
}
}
}
/*
* This function must be called after makeAttributeCombinerIfAppropriate().
*/
private void assertConsistentTypes(SummaryField existing, SummaryField seen) {
if (existing.getDataType() instanceof WeightedSetDataType && seen.getDataType() instanceof WeightedSetDataType &&
((WeightedSetDataType)existing.getDataType()).getNestedType().equals(((WeightedSetDataType)seen.getDataType()).getNestedType()))
return;
if ( ! compatibleTypes(seen.getDataType(), existing.getDataType()))
throw new IllegalArgumentException(existing.toLocateString() + " is inconsistent with " +
seen.toLocateString() + ": All declarations of the same summary field must have the same type");
}
private boolean compatibleTypes(DataType summaryType, DataType existingType) {
if (summaryType instanceof TensorDataType && existingType instanceof TensorDataType) {
return summaryType.isAssignableFrom(existingType);
}
return summaryType.equals(existingType);
}
private void makeConsistentOrThrow(SummaryField field1, SummaryField field2, Schema schema) {
if (field2.getTransform() == SummaryTransform.ATTRIBUTE && field1.getTransform() == SummaryTransform.NONE) {
Attribute attribute = schema.getAttribute(field1.getName());
if (attribute != null) {
field1.setTransform(SummaryTransform.ATTRIBUTE);
}
}
if (field2.getTransform().equals(SummaryTransform.NONE)) {
field2.setTransform(field1.getTransform());
}
else {
assertEqualTransform(field1,field2);
}
}
private void makeConsistentWithDefaultOrThrow(SummaryField defaultField, SummaryField newField) {
if (newField.getTransform().equals(SummaryTransform.NONE)) {
newField.setTransform(defaultField.getTransform());
}
else {
assertEqualTransform(defaultField,newField);
}
}
private void assertEqualTransform(SummaryField field1, SummaryField field2) {
if ( ! field2.getTransform().equals(field1.getTransform())) {
throw new IllegalArgumentException("Conflicting summary transforms. " + field2 + " is already defined as " +
field1 + ". A field with the same name " +
"can not have different transforms in different summary classes");
}
}
} | class SummaryConsistency extends Processor {
public SummaryConsistency(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(schema, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
public void process(boolean validate, boolean documentsOnly) {
for (DocumentSummary summary : schema.getSummaries().values()) {
if (summary.getName().equals("default")) continue;
for (SummaryField summaryField : summary.getSummaryFields().values()) {
assertConsistency(summaryField, schema, validate);
makeAttributeTransformIfAppropriate(summaryField, schema);
makeAttributeCombinerTransformIfAppropriate(summaryField, schema);
makeCopyTransformIfAppropriate(summaryField, schema);
}
}
}
private void assertConsistency(SummaryField summaryField, Schema schema, boolean validate) {
SummaryField existingDefault = schema.getSummariesInThis().get("default").getSummaryField(summaryField.getName());
if (existingDefault != null) {
if (validate)
assertConsistentTypes(existingDefault, summaryField);
makeConsistentWithDefaultOrThrow(existingDefault, summaryField);
}
else {
SummaryField existing = schema.getExplicitSummaryField(summaryField.getName());
if (existing == null) return;
if (validate)
assertConsistentTypes(existing, summaryField);
makeConsistentOrThrow(existing, summaryField, schema);
}
}
/** If the source is an attribute, make this use the attribute transform */
private void makeAttributeTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() != SummaryTransform.NONE) return;
Attribute attribute = schema.getAttribute(summaryField.getSingleSource());
if (attribute == null) return;
summaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
/** If the source is a complex field with only struct field attributes then make this use the attribute combiner transform */
private void makeAttributeCombinerTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() == SummaryTransform.NONE) {
String sourceFieldName = summaryField.getSingleSource();
ImmutableSDField source = schema.getField(sourceFieldName);
if (source != null && isComplexFieldWithOnlyStructFieldAttributes(source)) {
summaryField.setTransform(SummaryTransform.ATTRIBUTECOMBINER);
}
}
}
/*
* This function must be called after makeAttributeCombinerTransformIfAppropriate().
*/
private void assertConsistentTypes(SummaryField existing, SummaryField seen) {
if (existing.getDataType() instanceof WeightedSetDataType && seen.getDataType() instanceof WeightedSetDataType &&
((WeightedSetDataType)existing.getDataType()).getNestedType().equals(((WeightedSetDataType)seen.getDataType()).getNestedType()))
return;
if ( ! compatibleTypes(seen.getDataType(), existing.getDataType()))
throw new IllegalArgumentException(existing.toLocateString() + " is inconsistent with " +
seen.toLocateString() + ": All declarations of the same summary field must have the same type");
}
private boolean compatibleTypes(DataType summaryType, DataType existingType) {
if (summaryType instanceof TensorDataType && existingType instanceof TensorDataType) {
return summaryType.isAssignableFrom(existingType);
}
return summaryType.equals(existingType);
}
private void makeConsistentOrThrow(SummaryField field1, SummaryField field2, Schema schema) {
if (field2.getTransform() == SummaryTransform.ATTRIBUTE && field1.getTransform() == SummaryTransform.NONE) {
Attribute attribute = schema.getAttribute(field1.getName());
if (attribute != null) {
field1.setTransform(SummaryTransform.ATTRIBUTE);
}
}
if (field2.getTransform().equals(SummaryTransform.NONE)) {
field2.setTransform(field1.getTransform());
}
else {
assertEqualTransform(field1,field2);
}
}
private void makeConsistentWithDefaultOrThrow(SummaryField defaultField, SummaryField newField) {
if (newField.getTransform().equals(SummaryTransform.NONE)) {
newField.setTransform(defaultField.getTransform());
}
else {
assertEqualTransform(defaultField,newField);
}
}
private void assertEqualTransform(SummaryField field1, SummaryField field2) {
if ( ! field2.getTransform().equals(field1.getTransform())) {
throw new IllegalArgumentException("Conflicting summary transforms. " + field2 + " is already defined as " +
field1 + ". A field with the same name " +
"can not have different transforms in different summary classes");
}
}
} |
Done. | private void makeCopyTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() == SummaryTransform.NONE) {
String source_field_name = summaryField.getSingleSource();
ImmutableSDField source = schema.getField(source_field_name);
if (source != null && source.usesStructOrMap() && summaryField.hasExplicitSingleSource()) {
summaryField.setTransform(SummaryTransform.COPY);
}
}
} | ImmutableSDField source = schema.getField(source_field_name); | private void makeCopyTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() == SummaryTransform.NONE) {
String sourceFieldName = summaryField.getSingleSource();
ImmutableSDField source = schema.getField(sourceFieldName);
if (source != null && source.usesStructOrMap() && summaryField.hasExplicitSingleSource()) {
summaryField.setTransform(SummaryTransform.COPY);
}
}
} | class SummaryConsistency extends Processor {
public SummaryConsistency(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(schema, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
public void process(boolean validate, boolean documentsOnly) {
for (DocumentSummary summary : schema.getSummaries().values()) {
if (summary.getName().equals("default")) continue;
for (SummaryField summaryField : summary.getSummaryFields().values()) {
assertConsistency(summaryField, schema, validate);
makeAttributeTransformIfAppropriate(summaryField, schema);
makeAttributeCombinerTransformIfAppropriate(summaryField, schema);
makeCopyTransformIfAppropriate(summaryField, schema);
}
}
}
private void assertConsistency(SummaryField summaryField, Schema schema, boolean validate) {
SummaryField existingDefault = schema.getSummariesInThis().get("default").getSummaryField(summaryField.getName());
if (existingDefault != null) {
if (validate)
assertConsistentTypes(existingDefault, summaryField);
makeConsistentWithDefaultOrThrow(existingDefault, summaryField);
}
else {
SummaryField existing = schema.getExplicitSummaryField(summaryField.getName());
if (existing == null) return;
if (validate)
assertConsistentTypes(existing, summaryField);
makeConsistentOrThrow(existing, summaryField, schema);
}
}
/** If the source is an attribute, make this use the attribute transform */
private void makeAttributeTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() != SummaryTransform.NONE) return;
Attribute attribute = schema.getAttribute(summaryField.getSingleSource());
if (attribute == null) return;
summaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
/** If the source is a complex field with only struct field attributes then make this use the attribute combiner transform */
private void makeAttributeCombinerTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() == SummaryTransform.NONE) {
String source_field_name = summaryField.getSingleSource();
ImmutableSDField source = schema.getField(source_field_name);
if (source != null && isComplexFieldWithOnlyStructFieldAttributes(source)) {
summaryField.setTransform(SummaryTransform.ATTRIBUTECOMBINER);
}
}
}
/*
* This function must be called after makeAttributeCombinerIfAppropriate().
*/
private void assertConsistentTypes(SummaryField existing, SummaryField seen) {
if (existing.getDataType() instanceof WeightedSetDataType && seen.getDataType() instanceof WeightedSetDataType &&
((WeightedSetDataType)existing.getDataType()).getNestedType().equals(((WeightedSetDataType)seen.getDataType()).getNestedType()))
return;
if ( ! compatibleTypes(seen.getDataType(), existing.getDataType()))
throw new IllegalArgumentException(existing.toLocateString() + " is inconsistent with " +
seen.toLocateString() + ": All declarations of the same summary field must have the same type");
}
private boolean compatibleTypes(DataType summaryType, DataType existingType) {
if (summaryType instanceof TensorDataType && existingType instanceof TensorDataType) {
return summaryType.isAssignableFrom(existingType);
}
return summaryType.equals(existingType);
}
private void makeConsistentOrThrow(SummaryField field1, SummaryField field2, Schema schema) {
if (field2.getTransform() == SummaryTransform.ATTRIBUTE && field1.getTransform() == SummaryTransform.NONE) {
Attribute attribute = schema.getAttribute(field1.getName());
if (attribute != null) {
field1.setTransform(SummaryTransform.ATTRIBUTE);
}
}
if (field2.getTransform().equals(SummaryTransform.NONE)) {
field2.setTransform(field1.getTransform());
}
else {
assertEqualTransform(field1,field2);
}
}
private void makeConsistentWithDefaultOrThrow(SummaryField defaultField, SummaryField newField) {
if (newField.getTransform().equals(SummaryTransform.NONE)) {
newField.setTransform(defaultField.getTransform());
}
else {
assertEqualTransform(defaultField,newField);
}
}
private void assertEqualTransform(SummaryField field1, SummaryField field2) {
if ( ! field2.getTransform().equals(field1.getTransform())) {
throw new IllegalArgumentException("Conflicting summary transforms. " + field2 + " is already defined as " +
field1 + ". A field with the same name " +
"can not have different transforms in different summary classes");
}
}
} | class SummaryConsistency extends Processor {
public SummaryConsistency(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(schema, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
public void process(boolean validate, boolean documentsOnly) {
for (DocumentSummary summary : schema.getSummaries().values()) {
if (summary.getName().equals("default")) continue;
for (SummaryField summaryField : summary.getSummaryFields().values()) {
assertConsistency(summaryField, schema, validate);
makeAttributeTransformIfAppropriate(summaryField, schema);
makeAttributeCombinerTransformIfAppropriate(summaryField, schema);
makeCopyTransformIfAppropriate(summaryField, schema);
}
}
}
private void assertConsistency(SummaryField summaryField, Schema schema, boolean validate) {
SummaryField existingDefault = schema.getSummariesInThis().get("default").getSummaryField(summaryField.getName());
if (existingDefault != null) {
if (validate)
assertConsistentTypes(existingDefault, summaryField);
makeConsistentWithDefaultOrThrow(existingDefault, summaryField);
}
else {
SummaryField existing = schema.getExplicitSummaryField(summaryField.getName());
if (existing == null) return;
if (validate)
assertConsistentTypes(existing, summaryField);
makeConsistentOrThrow(existing, summaryField, schema);
}
}
/** If the source is an attribute, make this use the attribute transform */
private void makeAttributeTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() != SummaryTransform.NONE) return;
Attribute attribute = schema.getAttribute(summaryField.getSingleSource());
if (attribute == null) return;
summaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
/** If the source is a complex field with only struct field attributes then make this use the attribute combiner transform */
private void makeAttributeCombinerTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() == SummaryTransform.NONE) {
String sourceFieldName = summaryField.getSingleSource();
ImmutableSDField source = schema.getField(sourceFieldName);
if (source != null && isComplexFieldWithOnlyStructFieldAttributes(source)) {
summaryField.setTransform(SummaryTransform.ATTRIBUTECOMBINER);
}
}
}
/*
* This function must be called after makeAttributeCombinerTransformIfAppropriate().
*/
private void assertConsistentTypes(SummaryField existing, SummaryField seen) {
if (existing.getDataType() instanceof WeightedSetDataType && seen.getDataType() instanceof WeightedSetDataType &&
((WeightedSetDataType)existing.getDataType()).getNestedType().equals(((WeightedSetDataType)seen.getDataType()).getNestedType()))
return;
if ( ! compatibleTypes(seen.getDataType(), existing.getDataType()))
throw new IllegalArgumentException(existing.toLocateString() + " is inconsistent with " +
seen.toLocateString() + ": All declarations of the same summary field must have the same type");
}
private boolean compatibleTypes(DataType summaryType, DataType existingType) {
if (summaryType instanceof TensorDataType && existingType instanceof TensorDataType) {
return summaryType.isAssignableFrom(existingType);
}
return summaryType.equals(existingType);
}
private void makeConsistentOrThrow(SummaryField field1, SummaryField field2, Schema schema) {
if (field2.getTransform() == SummaryTransform.ATTRIBUTE && field1.getTransform() == SummaryTransform.NONE) {
Attribute attribute = schema.getAttribute(field1.getName());
if (attribute != null) {
field1.setTransform(SummaryTransform.ATTRIBUTE);
}
}
if (field2.getTransform().equals(SummaryTransform.NONE)) {
field2.setTransform(field1.getTransform());
}
else {
assertEqualTransform(field1,field2);
}
}
private void makeConsistentWithDefaultOrThrow(SummaryField defaultField, SummaryField newField) {
if (newField.getTransform().equals(SummaryTransform.NONE)) {
newField.setTransform(defaultField.getTransform());
}
else {
assertEqualTransform(defaultField,newField);
}
}
private void assertEqualTransform(SummaryField field1, SummaryField field2) {
if ( ! field2.getTransform().equals(field1.getTransform())) {
throw new IllegalArgumentException("Conflicting summary transforms. " + field2 + " is already defined as " +
field1 + ". A field with the same name " +
"can not have different transforms in different summary classes");
}
}
} |
Done. | private void makeCopyTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() == SummaryTransform.NONE) {
String source_field_name = summaryField.getSingleSource();
ImmutableSDField source = schema.getField(source_field_name);
if (source != null && source.usesStructOrMap() && summaryField.hasExplicitSingleSource()) {
summaryField.setTransform(SummaryTransform.COPY);
}
}
} | String source_field_name = summaryField.getSingleSource(); | private void makeCopyTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() == SummaryTransform.NONE) {
String sourceFieldName = summaryField.getSingleSource();
ImmutableSDField source = schema.getField(sourceFieldName);
if (source != null && source.usesStructOrMap() && summaryField.hasExplicitSingleSource()) {
summaryField.setTransform(SummaryTransform.COPY);
}
}
} | class SummaryConsistency extends Processor {
public SummaryConsistency(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(schema, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
public void process(boolean validate, boolean documentsOnly) {
for (DocumentSummary summary : schema.getSummaries().values()) {
if (summary.getName().equals("default")) continue;
for (SummaryField summaryField : summary.getSummaryFields().values()) {
assertConsistency(summaryField, schema, validate);
makeAttributeTransformIfAppropriate(summaryField, schema);
makeAttributeCombinerTransformIfAppropriate(summaryField, schema);
makeCopyTransformIfAppropriate(summaryField, schema);
}
}
}
private void assertConsistency(SummaryField summaryField, Schema schema, boolean validate) {
SummaryField existingDefault = schema.getSummariesInThis().get("default").getSummaryField(summaryField.getName());
if (existingDefault != null) {
if (validate)
assertConsistentTypes(existingDefault, summaryField);
makeConsistentWithDefaultOrThrow(existingDefault, summaryField);
}
else {
SummaryField existing = schema.getExplicitSummaryField(summaryField.getName());
if (existing == null) return;
if (validate)
assertConsistentTypes(existing, summaryField);
makeConsistentOrThrow(existing, summaryField, schema);
}
}
/** If the source is an attribute, make this use the attribute transform */
private void makeAttributeTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() != SummaryTransform.NONE) return;
Attribute attribute = schema.getAttribute(summaryField.getSingleSource());
if (attribute == null) return;
summaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
/** If the source is a complex field with only struct field attributes then make this use the attribute combiner transform */
private void makeAttributeCombinerTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() == SummaryTransform.NONE) {
String source_field_name = summaryField.getSingleSource();
ImmutableSDField source = schema.getField(source_field_name);
if (source != null && isComplexFieldWithOnlyStructFieldAttributes(source)) {
summaryField.setTransform(SummaryTransform.ATTRIBUTECOMBINER);
}
}
}
/*
* This function must be called after makeAttributeCombinerIfAppropriate().
*/
private void assertConsistentTypes(SummaryField existing, SummaryField seen) {
if (existing.getDataType() instanceof WeightedSetDataType && seen.getDataType() instanceof WeightedSetDataType &&
((WeightedSetDataType)existing.getDataType()).getNestedType().equals(((WeightedSetDataType)seen.getDataType()).getNestedType()))
return;
if ( ! compatibleTypes(seen.getDataType(), existing.getDataType()))
throw new IllegalArgumentException(existing.toLocateString() + " is inconsistent with " +
seen.toLocateString() + ": All declarations of the same summary field must have the same type");
}
private boolean compatibleTypes(DataType summaryType, DataType existingType) {
if (summaryType instanceof TensorDataType && existingType instanceof TensorDataType) {
return summaryType.isAssignableFrom(existingType);
}
return summaryType.equals(existingType);
}
private void makeConsistentOrThrow(SummaryField field1, SummaryField field2, Schema schema) {
if (field2.getTransform() == SummaryTransform.ATTRIBUTE && field1.getTransform() == SummaryTransform.NONE) {
Attribute attribute = schema.getAttribute(field1.getName());
if (attribute != null) {
field1.setTransform(SummaryTransform.ATTRIBUTE);
}
}
if (field2.getTransform().equals(SummaryTransform.NONE)) {
field2.setTransform(field1.getTransform());
}
else {
assertEqualTransform(field1,field2);
}
}
private void makeConsistentWithDefaultOrThrow(SummaryField defaultField, SummaryField newField) {
if (newField.getTransform().equals(SummaryTransform.NONE)) {
newField.setTransform(defaultField.getTransform());
}
else {
assertEqualTransform(defaultField,newField);
}
}
private void assertEqualTransform(SummaryField field1, SummaryField field2) {
if ( ! field2.getTransform().equals(field1.getTransform())) {
throw new IllegalArgumentException("Conflicting summary transforms. " + field2 + " is already defined as " +
field1 + ". A field with the same name " +
"can not have different transforms in different summary classes");
}
}
} | class SummaryConsistency extends Processor {
public SummaryConsistency(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(schema, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
public void process(boolean validate, boolean documentsOnly) {
for (DocumentSummary summary : schema.getSummaries().values()) {
if (summary.getName().equals("default")) continue;
for (SummaryField summaryField : summary.getSummaryFields().values()) {
assertConsistency(summaryField, schema, validate);
makeAttributeTransformIfAppropriate(summaryField, schema);
makeAttributeCombinerTransformIfAppropriate(summaryField, schema);
makeCopyTransformIfAppropriate(summaryField, schema);
}
}
}
private void assertConsistency(SummaryField summaryField, Schema schema, boolean validate) {
SummaryField existingDefault = schema.getSummariesInThis().get("default").getSummaryField(summaryField.getName());
if (existingDefault != null) {
if (validate)
assertConsistentTypes(existingDefault, summaryField);
makeConsistentWithDefaultOrThrow(existingDefault, summaryField);
}
else {
SummaryField existing = schema.getExplicitSummaryField(summaryField.getName());
if (existing == null) return;
if (validate)
assertConsistentTypes(existing, summaryField);
makeConsistentOrThrow(existing, summaryField, schema);
}
}
/** If the source is an attribute, make this use the attribute transform */
private void makeAttributeTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() != SummaryTransform.NONE) return;
Attribute attribute = schema.getAttribute(summaryField.getSingleSource());
if (attribute == null) return;
summaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
/** If the source is a complex field with only struct field attributes then make this use the attribute combiner transform */
private void makeAttributeCombinerTransformIfAppropriate(SummaryField summaryField, Schema schema) {
if (summaryField.getTransform() == SummaryTransform.NONE) {
String sourceFieldName = summaryField.getSingleSource();
ImmutableSDField source = schema.getField(sourceFieldName);
if (source != null && isComplexFieldWithOnlyStructFieldAttributes(source)) {
summaryField.setTransform(SummaryTransform.ATTRIBUTECOMBINER);
}
}
}
/*
* This function must be called after makeAttributeCombinerTransformIfAppropriate().
*/
private void assertConsistentTypes(SummaryField existing, SummaryField seen) {
if (existing.getDataType() instanceof WeightedSetDataType && seen.getDataType() instanceof WeightedSetDataType &&
((WeightedSetDataType)existing.getDataType()).getNestedType().equals(((WeightedSetDataType)seen.getDataType()).getNestedType()))
return;
if ( ! compatibleTypes(seen.getDataType(), existing.getDataType()))
throw new IllegalArgumentException(existing.toLocateString() + " is inconsistent with " +
seen.toLocateString() + ": All declarations of the same summary field must have the same type");
}
private boolean compatibleTypes(DataType summaryType, DataType existingType) {
if (summaryType instanceof TensorDataType && existingType instanceof TensorDataType) {
return summaryType.isAssignableFrom(existingType);
}
return summaryType.equals(existingType);
}
private void makeConsistentOrThrow(SummaryField field1, SummaryField field2, Schema schema) {
if (field2.getTransform() == SummaryTransform.ATTRIBUTE && field1.getTransform() == SummaryTransform.NONE) {
Attribute attribute = schema.getAttribute(field1.getName());
if (attribute != null) {
field1.setTransform(SummaryTransform.ATTRIBUTE);
}
}
if (field2.getTransform().equals(SummaryTransform.NONE)) {
field2.setTransform(field1.getTransform());
}
else {
assertEqualTransform(field1,field2);
}
}
private void makeConsistentWithDefaultOrThrow(SummaryField defaultField, SummaryField newField) {
if (newField.getTransform().equals(SummaryTransform.NONE)) {
newField.setTransform(defaultField.getTransform());
}
else {
assertEqualTransform(defaultField,newField);
}
}
private void assertEqualTransform(SummaryField field1, SummaryField field2) {
if ( ! field2.getTransform().equals(field1.getTransform())) {
throw new IllegalArgumentException("Conflicting summary transforms. " + field2 + " is already defined as " +
field1 + ". A field with the same name " +
"can not have different transforms in different summary classes");
}
}
} |
I assume you want this displayed in console? In that case, this must be logged with `log.logApplicationPackage()` | private List<ApplicationContainer> createNodes(ApplicationContainerCluster cluster, Element containerElement, Element nodesElement, ConfigModelContext context) {
if (nodesElement.hasAttribute("type"))
return createNodesFromNodeType(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("of")) {
List<ApplicationContainer> containers = createNodesFromContentServiceReference(cluster, nodesElement, context);
log.log(WARNING, "Declaring combined cluster with <nodes of=\"...\"> is deprecated without " +
"replacement, and the feature will be removed in Vespa 9. Use separate container and " +
"content clusters instead");
return containers;
} else if (nodesElement.hasAttribute("count"))
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else if (cluster.isHostedVespa() && cluster.getZone().environment().isManuallyDeployed())
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else
return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement);
} | log.log(WARNING, "Declaring combined cluster with <nodes of=\"...\"> is deprecated without " + | private List<ApplicationContainer> createNodes(ApplicationContainerCluster cluster, Element containerElement, Element nodesElement, ConfigModelContext context) {
if (nodesElement.hasAttribute("type"))
return createNodesFromNodeType(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("of")) {
List<ApplicationContainer> containers = createNodesFromContentServiceReference(cluster, nodesElement, context);
log.log(WARNING, "Declaring combined cluster with <nodes of=\"...\"> is deprecated without " +
"replacement, and the feature will be removed in Vespa 9. Use separate container and " +
"content clusters instead");
return containers;
} else if (nodesElement.hasAttribute("count"))
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else if (cluster.isHostedVespa() && cluster.getZone().environment().isManuallyDeployed())
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else
return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement);
} | class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html");
private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE";
private static final String CONTAINER_TAG = "container";
private static final String ENVIRONMENT_VARIABLES_ELEMENT = "environment-variables";
private static final int MIN_ZOOKEEPER_NODE_COUNT = 1;
private static final int MAX_ZOOKEEPER_NODE_COUNT = 7;
public enum Networking { disable, enable }
private ApplicationPackage app;
private final boolean standaloneBuilder;
private final Networking networking;
private final boolean rpcServerEnabled;
private final boolean httpServerEnabled;
protected DeployLogger log;
public static final List<ConfigModelId> configModelIds = ImmutableList.of(ConfigModelId.fromName(CONTAINER_TAG));
private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName();
private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName();
public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) {
super(ContainerModel.class);
this.standaloneBuilder = standaloneBuilder;
this.networking = networking;
this.rpcServerEnabled = !standaloneBuilder;
this.httpServerEnabled = networking == Networking.enable;
}
@Override
public List<ConfigModelId> handlesElements() {
return configModelIds;
}
@Override
public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) {
log = modelContext.getDeployLogger();
app = modelContext.getApplicationPackage();
checkVersion(spec);
ApplicationContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
cluster.setMessageBusEnabled(rpcServerEnabled);
cluster.setRpcServerEnabled(rpcServerEnabled);
cluster.setHttpServerEnabled(httpServerEnabled);
model.setCluster(cluster);
}
private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilder<ApplicationContainerCluster>() {
@Override
protected ApplicationContainerCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) {
return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(),
modelContext.getProducerId(), deployState);
}
}.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec);
}
private void addClusterContent(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
DocumentFactoryBuilder.buildDocumentFactories(cluster, spec);
addConfiguredComponents(deployState, cluster, spec);
addSecretStore(cluster, spec, deployState);
addEmbedderComponents(deployState, cluster, spec);
addModelEvaluation(spec, cluster, context);
addModelEvaluationBundles(cluster);
addProcessing(deployState, spec, cluster);
addSearch(deployState, spec, cluster);
addDocproc(deployState, spec, cluster);
addDocumentApi(spec, cluster);
cluster.addDefaultHandlersExceptStatus();
addStatusHandlers(cluster, context.getDeployState().isHosted());
addUserHandlers(deployState, cluster, spec);
addHttp(deployState, spec, cluster, context);
addAccessLogs(deployState, cluster, spec);
addNodes(cluster, spec, context);
addServerProviders(deployState, spec, cluster);
addDeploymentSpecConfig(cluster, context, deployState.getDeployLogger());
addZooKeeper(cluster, spec);
addParameterStoreValidationHandler(cluster, deployState);
}
private void addParameterStoreValidationHandler(ApplicationContainerCluster cluster, DeployState deployState) {
if(deployState.isHosted()) {
cluster.addPlatformBundle(PlatformBundles.absoluteBundlePath("jdisc-cloud-aws"));
}
if (deployState.zone().system().isPublic()) {
BindingPattern bindingPattern = SystemBindingPattern.fromHttpPath("/validate-secret-store");
Handler<AbstractConfigProducer<?>> handler = new Handler<>(
new ComponentModel("com.yahoo.jdisc.cloud.aws.AwsParameterStoreValidationHandler", null, "jdisc-cloud-aws", null));
handler.addServerBindings(bindingPattern);
cluster.addComponent(handler);
}
}
private void addZooKeeper(ApplicationContainerCluster cluster, Element spec) {
if ( ! hasZooKeeper(spec)) return;
Element nodesElement = XML.getChild(spec, "nodes");
boolean isCombined = nodesElement != null && nodesElement.hasAttribute("of");
if (isCombined) {
throw new IllegalArgumentException("A combined cluster cannot run ZooKeeper");
}
long nonRetiredNodes = cluster.getContainers().stream().filter(c -> !c.isRetired()).count();
if (nonRetiredNodes < MIN_ZOOKEEPER_NODE_COUNT || nonRetiredNodes > MAX_ZOOKEEPER_NODE_COUNT || nonRetiredNodes % 2 == 0) {
throw new IllegalArgumentException("Cluster with ZooKeeper needs an odd number of nodes, between " +
MIN_ZOOKEEPER_NODE_COUNT + " and " + MAX_ZOOKEEPER_NODE_COUNT +
", have " + nonRetiredNodes + " non-retired");
}
cluster.addSimpleComponent("com.yahoo.vespa.curator.Curator", null, "zkfacade");
cluster.getContainers().forEach(ContainerModelBuilder::addReconfigurableZooKeeperServerComponents);
}
public static void addReconfigurableZooKeeperServerComponents(Container container) {
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.ReconfigurableVespaZooKeeperServer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.Reconfigurer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.VespaZooKeeperAdminImpl", container));
}
private static SimpleComponent zookeeperComponent(String idSpec, Container container) {
String configId = container.getConfigId();
return new SimpleComponent(new ComponentModel(idSpec, null, "zookeeper-server", configId));
}
private void addSecretStore(ApplicationContainerCluster cluster, Element spec, DeployState deployState) {
Element secretStoreElement = XML.getChild(spec, "secret-store");
if (secretStoreElement != null) {
String type = secretStoreElement.getAttribute("type");
if ("cloud".equals(type)) {
addCloudSecretStore(cluster, secretStoreElement, deployState);
} else {
SecretStore secretStore = new SecretStore();
for (Element group : XML.getChildren(secretStoreElement, "group")) {
secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment"));
}
cluster.setSecretStore(secretStore);
}
}
}
private void addCloudSecretStore(ApplicationContainerCluster cluster, Element secretStoreElement, DeployState deployState) {
if ( ! deployState.isHosted()) return;
if ( ! cluster.getZone().system().isPublic())
throw new IllegalArgumentException("Cloud secret store is not supported in non-public system, see the documentation");
CloudSecretStore cloudSecretStore = new CloudSecretStore();
Map<String, TenantSecretStore> secretStoresByName = deployState.getProperties().tenantSecretStores()
.stream()
.collect(Collectors.toMap(
TenantSecretStore::getName,
store -> store
));
Element store = XML.getChild(secretStoreElement, "store");
for (Element group : XML.getChildren(store, "aws-parameter-store")) {
String account = group.getAttribute("account");
String region = group.getAttribute("aws-region");
TenantSecretStore secretStore = secretStoresByName.get(account);
if (secretStore == null)
throw new IllegalArgumentException("No configured secret store named " + account);
if (secretStore.getExternalId().isEmpty())
throw new IllegalArgumentException("No external ID has been set");
cloudSecretStore.addConfig(account, region, secretStore.getAwsId(), secretStore.getRole(), secretStore.getExternalId().get());
}
cluster.addComponent(cloudSecretStore);
}
private void addDeploymentSpecConfig(ApplicationContainerCluster cluster, ConfigModelContext context, DeployLogger deployLogger) {
if ( ! context.getDeployState().isHosted()) return;
Optional<DeploymentSpec> deploymentSpec = app.getDeployment().map(DeploymentSpec::fromXml);
if (deploymentSpec.isEmpty()) return;
for (var deprecatedElement : deploymentSpec.get().deprecatedElements()) {
deployLogger.logApplicationPackage(WARNING, deprecatedElement.humanReadableString());
}
addIdentityProvider(cluster,
context.getDeployState().getProperties().configServerSpecs(),
context.getDeployState().getProperties().loadBalancerName(),
context.getDeployState().getProperties().ztsUrl(),
context.getDeployState().getProperties().athenzDnsSuffix(),
context.getDeployState().zone(),
deploymentSpec.get());
addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getEndpoints(), deploymentSpec.get());
}
private void addRotationProperties(ApplicationContainerCluster cluster, Zone zone, Set<ContainerEndpoint> endpoints, DeploymentSpec spec) {
cluster.getContainers().forEach(container -> {
setRotations(container, endpoints, cluster.getName());
container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec)));
});
}
private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) {
Optional<DeploymentInstanceSpec> instance = spec.instance(app.getApplicationId().instance());
if (instance.isEmpty()) return false;
return instance.get().zones().stream()
.anyMatch(declaredZone -> declaredZone.concerns(zone.environment(), Optional.of(zone.region())) &&
declaredZone.active());
}
private void setRotations(Container container, Set<ContainerEndpoint> endpoints, String containerClusterName) {
var rotationsProperty = endpoints.stream()
.filter(endpoint -> endpoint.clusterId().equals(containerClusterName))
.filter(endpoint -> endpoint.scope() == ApplicationClusterEndpoint.Scope.global)
.flatMap(endpoint -> endpoint.names().stream())
.collect(Collectors.toCollection(() -> new LinkedHashSet<>()));
container.setProp("rotations", String.join(",", rotationsProperty));
}
private static void addEmbedderComponents(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element node : XML.getChildren(spec, "embedder")) {
Element transformed = EmbedderConfig.transform(deployState, node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, transformed));
}
}
private void addConfiguredComponents(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element components : XML.getChildren(spec, "components")) {
addIncludes(components);
addConfiguredComponents(deployState, cluster, components, "component");
}
addConfiguredComponents(deployState, cluster, spec, "component");
}
protected void addStatusHandlers(ApplicationContainerCluster cluster, boolean isHostedVespa) {
if (isHostedVespa) {
String name = "status.html";
Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_SETTING));
cluster.addComponent(
new FileStatusHandlerComponent(
name + "-status-handler",
statusFile.orElse(HOSTED_VESPA_STATUS_FILE),
SystemBindingPattern.fromHttpPath("/" + name)));
} else {
cluster.addVipHandler();
}
}
private void addServerProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
addConfiguredComponents(deployState, cluster, spec, "server");
}
protected void addAccessLogs(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
List<Element> accessLogElements = getAccessLogElements(spec);
if (cluster.isHostedVespa() && !accessLogElements.isEmpty()) {
accessLogElements.clear();
log.logApplicationPackage(
Level.WARNING, "Applications are not allowed to override the 'accesslog' element");
} else {
for (Element accessLog : accessLogElements) {
AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(cluster::addComponent);
}
}
if (accessLogElements.isEmpty() && deployState.getAccessLoggingEnabledByDefault())
cluster.addDefaultSearchAccessLog();
if (cluster.getAllComponents().stream().anyMatch(component -> component instanceof AccessLogComponent)) {
if (cluster.isHostedVespa() || deployState.getVespaVersion().getMajor() == 8) {
cluster.addComponent(new ConnectionLogComponent(cluster, FileConnectionLog.class, "access"));
} else {
cluster.addComponent(new ConnectionLogComponent(cluster, FileConnectionLog.class, "qrs"));
}
}
}
private List<Element> getAccessLogElements(Element spec) {
return XML.getChildren(spec, "accesslog");
}
protected void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element httpElement = XML.getChild(spec, "http");
if (httpElement != null) {
cluster.setHttp(buildHttp(deployState, cluster, httpElement));
}
if (isHostedTenantApplication(context)) {
addHostedImplicitHttpIfNotPresent(deployState, cluster);
addHostedImplicitAccessControlIfNotPresent(deployState, cluster);
addDefaultConnectorHostedFilterBinding(cluster);
addAdditionalHostedConnector(deployState, cluster, context);
}
}
private void addDefaultConnectorHostedFilterBinding(ApplicationContainerCluster cluster) {
cluster.getHttp().getAccessControl()
.ifPresent(accessControl -> accessControl.configureDefaultHostedConnector(cluster.getHttp())); ;
}
private void addAdditionalHostedConnector(DeployState deployState, ApplicationContainerCluster cluster, ConfigModelContext context) {
JettyHttpServer server = cluster.getHttp().getHttpServer().get();
String serverName = server.getComponentId().getName();
HostedSslConnectorFactory connectorFactory;
Collection<String> tlsCiphersOverride = deployState.getProperties().tlsCiphersOverride();
boolean proxyProtocolMixedMode = deployState.getProperties().featureFlags().enableProxyProtocolMixedMode();
if (deployState.endpointCertificateSecrets().isPresent()) {
boolean authorizeClient = deployState.zone().system().isPublic();
if (authorizeClient && deployState.tlsClientAuthority().isEmpty()) {
throw new IllegalArgumentException("Client certificate authority security/clients.pem is missing - " +
"see: https:
}
EndpointCertificateSecrets endpointCertificateSecrets = deployState.endpointCertificateSecrets().get();
boolean enforceHandshakeClientAuth = cluster.getHttp().getAccessControl()
.map(accessControl -> accessControl.clientAuthentication)
.map(clientAuth -> clientAuth == AccessControl.ClientAuthentication.need)
.orElse(false);
connectorFactory = authorizeClient
? HostedSslConnectorFactory.withProvidedCertificateAndTruststore(
serverName, endpointCertificateSecrets, getTlsClientAuthorities(deployState), tlsCiphersOverride, proxyProtocolMixedMode)
: HostedSslConnectorFactory.withProvidedCertificate(
serverName, endpointCertificateSecrets, enforceHandshakeClientAuth, tlsCiphersOverride, proxyProtocolMixedMode);
} else {
connectorFactory = HostedSslConnectorFactory.withDefaultCertificateAndTruststore(serverName, tlsCiphersOverride, proxyProtocolMixedMode);
}
cluster.getHttp().getAccessControl().ifPresent(accessControl -> accessControl.configureHostedConnector(connectorFactory));
server.addConnector(connectorFactory);
}
/*
Return trusted certificates as a PEM encoded string containing the concatenation of
trusted certs from the application package and all operator certificates.
*/
String getTlsClientAuthorities(DeployState deployState) {
List<X509Certificate> trustedCertificates = deployState.tlsClientAuthority()
.map(X509CertificateUtils::certificateListFromPem)
.orElse(Collections.emptyList());
ArrayList<X509Certificate> x509Certificates = new ArrayList<>(trustedCertificates);
x509Certificates.addAll(deployState.getProperties().operatorCertificates());
return X509CertificateUtils.toPem(x509Certificates);
}
private static boolean isHostedTenantApplication(ConfigModelContext context) {
var deployState = context.getDeployState();
boolean isTesterApplication = deployState.getProperties().applicationId().instance().isTester();
return deployState.isHosted() && context.getApplicationType() == ApplicationType.DEFAULT && !isTesterApplication;
}
private static void addHostedImplicitHttpIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
if (cluster.getHttp() == null) {
cluster.setHttp(new Http(new FilterChains(cluster)));
}
JettyHttpServer httpServer = cluster.getHttp().getHttpServer().orElse(null);
if (httpServer == null) {
httpServer = new JettyHttpServer("DefaultHttpServer", cluster, deployState);
cluster.getHttp().setHttpServer(httpServer);
}
int defaultPort = Defaults.getDefaults().vespaWebServicePort();
boolean defaultConnectorPresent = httpServer.getConnectorFactories().stream().anyMatch(connector -> connector.getListenPort() == defaultPort);
if (!defaultConnectorPresent) {
httpServer.addConnector(new ConnectorFactory.Builder("SearchServer", defaultPort).build());
}
}
private void addHostedImplicitAccessControlIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
Http http = cluster.getHttp();
if (http.getAccessControl().isPresent()) return;
AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null);
if (tenantDomain == null) return;
new AccessControl.Builder(tenantDomain.value())
.setHandlers(cluster)
.clientAuthentication(AccessControl.ClientAuthentication.need)
.build()
.configureHttpFilterChains(http);
}
private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement) {
Http http = new HttpBuilder().build(deployState, cluster, httpElement);
if (networking == Networking.disable)
http.removeAllServers();
return http;
}
private void addDocumentApi(Element spec, ApplicationContainerCluster cluster) {
ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec);
if (containerDocumentApi == null) return;
cluster.setDocumentApi(containerDocumentApi);
}
private void addDocproc(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec);
if (containerDocproc == null) return;
cluster.setDocproc(containerDocproc);
ContainerDocproc.Options docprocOptions = containerDocproc.options;
cluster.setMbusParams(new ApplicationContainerCluster.MbusParams(
docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory));
}
private void addSearch(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
Element searchElement = XML.getChild(spec, "search");
if (searchElement == null) return;
addIncludes(searchElement);
cluster.setSearch(buildSearch(deployState, cluster, searchElement));
addSearchHandler(cluster, searchElement);
addGUIHandler(cluster);
validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private void addModelEvaluation(Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element modelEvaluationElement = XML.getChild(spec, "model-evaluation");
if (modelEvaluationElement == null) return;
RankProfileList profiles =
context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty;
Element onnxElement = XML.getChild(modelEvaluationElement, "onnx");
Element modelsElement = XML.getChild(onnxElement, "models");
for (Element modelElement : XML.getChildren(modelsElement, "model") ) {
OnnxModel onnxModel = profiles.getOnnxModels().asMap().get(modelElement.getAttribute("name"));
if (onnxModel == null)
continue;
onnxModel.setStatelessExecutionMode(getStringValue(modelElement, "execution-mode", null));
onnxModel.setStatelessInterOpThreads(getIntValue(modelElement, "interop-threads", -1));
onnxModel.setStatelessIntraOpThreads(getIntValue(modelElement, "intraop-threads", -1));
}
cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles));
}
private String getStringValue(Element element, String name, String defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? child.getTextContent() : defaultValue;
}
private int getIntValue(Element element, String name, int defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? Integer.parseInt(child.getTextContent()) : defaultValue;
}
protected void addModelEvaluationBundles(ApplicationContainerCluster cluster) {
/* These bundles are added to all application container clusters, even if they haven't
* declared 'model-evaluation' in services.xml, because there are many public API packages
* in the model-evaluation bundle that could be used by customer code. */
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_EVALUATION_BUNDLE_FILE);
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_INTEGRATION_BUNDLE_FILE);
}
private void addProcessing(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
Element processingElement = XML.getChild(spec, "processing");
if (processingElement == null) return;
addIncludes(processingElement);
cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement),
serverBindings(processingElement, ProcessingChains.defaultBindings).toArray(BindingPattern[]::new));
validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private ContainerSearch buildSearch(DeployState deployState, ApplicationContainerCluster containerCluster, Element producerSpec) {
SearchChains searchChains = new DomSearchChainsBuilder()
.build(deployState, containerCluster, producerSpec);
ContainerSearch containerSearch = new ContainerSearch(containerCluster, searchChains, new ContainerSearch.Options());
applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch);
containerSearch.setQueryProfiles(deployState.getQueryProfiles());
containerSearch.setSemanticRules(deployState.getSemanticRules());
return containerSearch;
}
private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) {
PageTemplates.validate(applicationPackage);
containerSearch.setPageTemplates(PageTemplates.create(applicationPackage));
}
private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element component: XML.getChildren(spec, "handler")) {
cluster.addComponent(
new DomHandlerBuilder(cluster).build(deployState, cluster, component));
}
}
private void checkVersion(Element spec) {
String version = spec.getAttribute("version");
if ( ! Version.fromString(version).equals(new Version(1)))
throw new IllegalArgumentException("Expected container version to be 1.0, but got " + version);
}
private void addNodes(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
if (standaloneBuilder)
addStandaloneNode(cluster, context.getDeployState());
else
addNodesFromXml(cluster, spec, context);
}
private void addStandaloneNode(ApplicationContainerCluster cluster, DeployState deployState) {
ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), deployState);
cluster.addContainers(Collections.singleton(container));
}
private static String buildJvmGCOptions(ConfigModelContext context, String jvmGCOptions) {
return new JvmGcOptions(context.getDeployState(), jvmGCOptions).build();
}
private static String getJvmOptions(ApplicationContainerCluster cluster,
Element nodesElement,
DeployState deployState,
boolean legacyOptions) {
return new JvmOptions(cluster, nodesElement, deployState, legacyOptions).build();
}
private static String extractAttribute(Element element, String attrName) {
return element.hasAttribute(attrName) ? element.getAttribute(attrName) : null;
}
private void extractJvmOptions(List<ApplicationContainer> nodes,
ApplicationContainerCluster cluster,
Element nodesElement,
ConfigModelContext context) {
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) {
extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context);
} else {
extractJvmTag(nodes, cluster, nodesElement, jvmElement, context);
}
}
private void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployState(), true));
if (cluster.getJvmGCOptions().isEmpty()) {
String jvmGCOptions = extractAttribute(nodesElement, VespaDomBuilder.JVM_GC_OPTIONS);
cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions));
}
applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
}
private void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, Element jvmElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployState(), false));
applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
String jvmGCOptions = extractAttribute(jvmElement, VespaDomBuilder.GC_OPTIONS);
cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions));
}
/**
* Add nodes to cluster according to the given containerElement.
*
* Note: DO NOT change allocation behaviour to allow version X and Y of the config-model to allocate a different set
* of nodes. Such changes must be guarded by a common condition (e.g. feature flag) so the behaviour can be changed
* simultaneously for all active config models.
*/
private void addNodesFromXml(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) {
Element nodesElement = XML.getChild(containerElement, "nodes");
if (nodesElement == null) {
cluster.addContainers(allocateWithoutNodesTag(cluster, context));
} else {
List<ApplicationContainer> nodes = createNodes(cluster, containerElement, nodesElement, context);
extractJvmOptions(nodes, cluster, nodesElement, context);
applyDefaultPreload(nodes, nodesElement);
String environmentVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT));
if (!environmentVars.isEmpty()) {
cluster.setEnvironmentVars(environmentVars);
}
if (useCpuSocketAffinity(nodesElement))
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
}
private static String getEnvironmentVariables(Element environmentVariables) {
StringBuilder sb = new StringBuilder();
if (environmentVariables != null) {
for (Element var: XML.getChildren(environmentVariables)) {
sb.append(var.getNodeName()).append('=').append(var.getTextContent()).append(' ');
}
}
return sb.toString();
}
private static void applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) {
if (memoryPercentage == null || memoryPercentage.isEmpty()) return;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
try {
cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage));
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
}
}
/** Allocate a container cluster without a nodes tag */
private List<ApplicationContainer> allocateWithoutNodesTag(ApplicationContainerCluster cluster, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
HostSystem hostSystem = cluster.hostSystem();
if (deployState.isHosted()) {
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(deployState.getWantedNodeVespaVersion())
.dockerImageRepository(deployState.getWantedDockerImageRepo())
.build();
int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1;
deployState.getDeployLogger().logApplicationPackage(Level.INFO, "Using " + nodeCount +
" nodes in " + cluster);
Capacity capacity = Capacity.from(new ClusterResources(nodeCount, 1, NodeResources.unspecified()),
false,
!deployState.getProperties().isBootstrap());
var hosts = hostSystem.allocateHosts(clusterSpec, capacity, log);
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
else {
return singleHostContainerCluster(cluster, hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC), context);
}
}
private List<ApplicationContainer> singleHostContainerCluster(ApplicationContainerCluster cluster, HostResource host, ConfigModelContext context) {
ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, context.getDeployState());
node.setHostResource(host);
node.initService(context.getDeployState());
return List.of(node);
}
private List<ApplicationContainer> createNodesFromNodeCount(ApplicationContainerCluster cluster, Element containerElement, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().hostSystem(),
ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
log,
hasZooKeeper(containerElement));
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromNodeType(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodeType type = NodeType.valueOf(nodesElement.getAttribute("type"));
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(context.getDeployState().getWantedNodeVespaVersion())
.dockerImageRepository(context.getDeployState().getWantedDockerImageRepo())
.build();
Map<HostResource, ClusterMembership> hosts =
cluster.getRoot().hostSystem().allocateHosts(clusterSpec,
Capacity.fromRequiredNodeType(type), log);
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromContentServiceReference(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodeSpecification;
try {
nodeSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(cluster + " contains an invalid reference", e);
}
String referenceId = nodesElement.getAttribute("of");
cluster.setHostClusterId(referenceId);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodeSpecification,
referenceId,
cluster.getRoot().hostSystem(),
context.getDeployLogger());
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromHosts(Map<HostResource, ClusterMembership> hosts,
ApplicationContainerCluster cluster,
DeployState deployState) {
List<ApplicationContainer> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), deployState);
container.setHostResource(entry.getKey());
container.initService(deployState);
nodes.add(container);
}
return nodes;
}
private List<ApplicationContainer> createNodesFromNodeList(DeployState deployState, ApplicationContainerCluster cluster, Element nodesElement) {
List<ApplicationContainer> nodes = new ArrayList<>();
int nodeIndex = 0;
for (Element nodeElem: XML.getChildren(nodesElement, "node")) {
nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem));
nodeIndex++;
}
return nodes;
}
private static boolean useCpuSocketAffinity(Element nodesElement) {
if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME))
return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME));
else
return false;
}
private static void applyNodesTagJvmArgs(List<ApplicationContainer> containers, String jvmArgs) {
for (Container container: containers) {
if (container.getAssignedJvmOptions().isEmpty())
container.prependJvmOptions(jvmArgs);
}
}
private static void applyDefaultPreload(List<ApplicationContainer> containers, Element nodesElement) {
if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return;
for (Container container: containers)
container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME));
}
private void addSearchHandler(ApplicationContainerCluster cluster, Element searchElement) {
cluster.addComponent(new ProcessingHandler<>(cluster.getSearch().getChains(),
"com.yahoo.search.searchchain.ExecutionFactory"));
cluster.addComponent(
new SearchHandler(
cluster,
serverBindings(searchElement, SearchHandler.DEFAULT_BINDING),
ContainerThreadpool.UserOptions.fromXml(searchElement).orElse(null)));
}
private void addGUIHandler(ApplicationContainerCluster cluster) {
Handler<?> guiHandler = new GUIHandler();
guiHandler.addServerBindings(SystemBindingPattern.fromHttpPath(GUIHandler.BINDING_PATH));
cluster.addComponent(guiHandler);
}
private List<BindingPattern> serverBindings(Element searchElement, BindingPattern... defaultBindings) {
List<Element> bindings = XML.getChildren(searchElement, "binding");
if (bindings.isEmpty())
return List.of(defaultBindings);
return toBindingList(bindings);
}
private List<BindingPattern> toBindingList(List<Element> bindingElements) {
List<BindingPattern> result = new ArrayList<>();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
result.add(UserBindingPattern.fromPattern(text));
}
return result;
}
private ContainerDocumentApi buildDocumentApi(ApplicationContainerCluster cluster, Element spec) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
return new ContainerDocumentApi(cluster, documentApiOptions);
}
private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
Element docprocElement = XML.getChild(spec, "document-processing");
if (docprocElement == null)
return null;
addIncludes(docprocElement);
DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement);
ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement);
return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder);
}
private void addIncludes(Element parentElement) {
List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE);
if (includes.isEmpty()) {
return;
}
if (app == null) {
throw new IllegalArgumentException("Element <include> given in XML config, but no application package given.");
}
for (Element include : includes) {
addInclude(parentElement, include);
}
}
private void addInclude(Element parentElement, Element include) {
String dirName = include.getAttribute(IncludeDirs.DIR);
app.validateIncludeDir(dirName);
List<Element> includedFiles = Xml.allElemsFromPath(app, dirName);
for (Element includedFile : includedFiles) {
List<Element> includedSubElements = XML.getChildren(includedFile);
for (Element includedSubElement : includedSubElements) {
Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true);
parentElement.appendChild(copiedNode);
}
}
}
private static void addConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster,
Element spec, String componentName) {
for (Element node : XML.getChildren(spec, componentName)) {
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private static void validateAndAddConfiguredComponents(DeployState deployState,
ContainerCluster<? extends Container> cluster,
Element spec, String componentName,
Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private void addIdentityProvider(ApplicationContainerCluster cluster,
List<ConfigServerSpec> configServerSpecs,
HostName loadBalancerName,
URI ztsUrl,
String athenzDnsSuffix,
Zone zone,
DeploymentSpec spec) {
spec.athenzDomain()
.ifPresent(domain -> {
AthenzService service = spec.instance(app.getApplicationId().instance())
.flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region()))
.or(() -> spec.athenzService())
.orElseThrow(() -> new IllegalArgumentException("Missing Athenz service configuration in instance '" +
app.getApplicationId().instance() + "'"));
String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix;
IdentityProvider identityProvider = new IdentityProvider(domain,
service,
getLoadBalancerName(loadBalancerName, configServerSpecs),
ztsUrl,
zoneDnsSuffix,
zone);
cluster.addComponent(identityProvider);
cluster.getContainers().forEach(container -> {
container.setProp("identity.domain", domain.value());
container.setProp("identity.service", service.value());
});
});
}
private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) {
return Optional.ofNullable(loadbalancerName)
.orElseGet(
() -> HostName.of(configServerSpecs.stream()
.findFirst()
.map(ConfigServerSpec::getHostName)
.orElse("unknown")
));
}
private static boolean hasZooKeeper(Element spec) {
return XML.getChild(spec, "zookeeper") != null;
}
/** Disallow renderers named "XmlRenderer" or "JsonRenderer" */
private static void validateRendererElement(Element element) {
String idAttr = element.getAttribute("id");
if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) {
throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr));
}
}
public static boolean isContainerTag(Element element) {
return CONTAINER_TAG.equals(element.getTagName());
}
/**
* Validates JVM options and logs a warning or fails deployment (depending on feature flag)
* if anyone of them has invalid syntax or is an option that is unsupported for the running system.
*/
private static class JvmOptions {
private static final Pattern validPattern = Pattern.compile("-[a-zA-z0-9=:./,+-]+");
private static final Pattern invalidInHostedatttern = Pattern.compile("-Xrunjdwp:transport=.*");
private final ContainerCluster<?> cluster;
private final Element nodesElement;
private final DeployLogger logger;
private final boolean legacyOptions;
private final boolean isHosted;
public JvmOptions(ContainerCluster<?> cluster, Element nodesElement, DeployState deployState, boolean legacyOptions) {
this.cluster = cluster;
this.nodesElement = nodesElement;
this.logger = deployState.getDeployLogger();
this.legacyOptions = legacyOptions;
this.isHosted = deployState.isHosted();
}
String build() {
if (legacyOptions)
return buildLegacyOptions();
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) return "";
String jvmOptions = jvmElement.getAttribute(VespaDomBuilder.OPTIONS);
if (jvmOptions.isEmpty()) return "";
validateJvmOptions(jvmOptions);
return jvmOptions;
}
String buildLegacyOptions() {
String jvmOptions = null;
if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS);
if (! jvmOptions.isEmpty())
logger.logApplicationPackage(WARNING, "'jvm-options' is deprecated and will be removed in Vespa 8." +
" Please merge 'jvm-options' into 'options' or 'gc-options' in 'jvm' element." +
" See https:
}
validateJvmOptions(jvmOptions);
return jvmOptions;
}
private void validateJvmOptions(String jvmOptions) {
if (jvmOptions == null || jvmOptions.isEmpty()) return;
String[] optionList = jvmOptions.split(" ");
List<String> invalidOptions = Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> !Pattern.matches(validPattern.pattern(), option))
.sorted()
.collect(Collectors.toList());
if (isHosted)
invalidOptions.addAll(Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> Pattern.matches(invalidInHostedatttern.pattern(), option))
.sorted()
.collect(Collectors.toList()));
if (invalidOptions.isEmpty()) return;
String message = "Invalid or misplaced JVM options in services.xml: " +
String.join(",", invalidOptions) + "." +
" See https:
if (isHosted)
throw new IllegalArgumentException(message);
else
logger.logApplicationPackage(WARNING, message);
}
}
/**
* Validates JVM GC options and logs a warning or fails deployment (depending on feature flag)
* if anyone of them has invalid syntax or is an option that is unsupported for the running system
* (e.g. uses CMS options for hosted Vespa, which uses JDK 17).
*/
private static class JvmGcOptions {
private static final Pattern validPattern = Pattern.compile("-XX:[+-]*[a-zA-z0-9=]+");
private static final Pattern invalidCMSPattern = Pattern.compile("-XX:[+-]\\w*CMS[a-zA-z0-9=]+");
private final DeployState deployState;
private final String jvmGcOptions;
private final DeployLogger logger;
private final boolean isHosted;
public JvmGcOptions(DeployState deployState, String jvmGcOptions) {
this.deployState = deployState;
this.jvmGcOptions = jvmGcOptions;
this.logger = deployState.getDeployLogger();
this.isHosted = deployState.isHosted();
}
private String build() {
String options = deployState.getProperties().jvmGCOptions();
if (jvmGcOptions != null) {
options = jvmGcOptions;
String[] optionList = options.split(" ");
List<String> invalidOptions = Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> !Pattern.matches(validPattern.pattern(), option))
.collect(Collectors.toList());
if (isHosted) {
invalidOptions.addAll(Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> Pattern.matches(invalidCMSPattern.pattern(), option) ||
option.equals("-XX:+UseConcMarkSweepGC"))
.collect(Collectors.toList()));
}
logOrFailInvalidOptions(invalidOptions);
}
if (options == null || options.isEmpty())
options = deployState.isHosted() ? ContainerCluster.PARALLEL_GC : ContainerCluster.G1GC;
return options;
}
private void logOrFailInvalidOptions(List<String> options) {
if (options.isEmpty()) return;
Collections.sort(options);
String message = "Invalid or misplaced JVM GC options in services.xml: " +
String.join(",", options) + "." +
" See https:
if (isHosted)
throw new IllegalArgumentException(message);
else
logger.logApplicationPackage(WARNING, message);
}
}
} | class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html");
private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE";
private static final String CONTAINER_TAG = "container";
private static final String ENVIRONMENT_VARIABLES_ELEMENT = "environment-variables";
private static final int MIN_ZOOKEEPER_NODE_COUNT = 1;
private static final int MAX_ZOOKEEPER_NODE_COUNT = 7;
public enum Networking { disable, enable }
private ApplicationPackage app;
private final boolean standaloneBuilder;
private final Networking networking;
private final boolean rpcServerEnabled;
private final boolean httpServerEnabled;
protected DeployLogger log;
public static final List<ConfigModelId> configModelIds = ImmutableList.of(ConfigModelId.fromName(CONTAINER_TAG));
private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName();
private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName();
public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) {
super(ContainerModel.class);
this.standaloneBuilder = standaloneBuilder;
this.networking = networking;
this.rpcServerEnabled = !standaloneBuilder;
this.httpServerEnabled = networking == Networking.enable;
}
@Override
public List<ConfigModelId> handlesElements() {
return configModelIds;
}
@Override
public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) {
log = modelContext.getDeployLogger();
app = modelContext.getApplicationPackage();
checkVersion(spec);
ApplicationContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
cluster.setMessageBusEnabled(rpcServerEnabled);
cluster.setRpcServerEnabled(rpcServerEnabled);
cluster.setHttpServerEnabled(httpServerEnabled);
model.setCluster(cluster);
}
private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilder<ApplicationContainerCluster>() {
@Override
protected ApplicationContainerCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) {
return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(),
modelContext.getProducerId(), deployState);
}
}.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec);
}
private void addClusterContent(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
DocumentFactoryBuilder.buildDocumentFactories(cluster, spec);
addConfiguredComponents(deployState, cluster, spec);
addSecretStore(cluster, spec, deployState);
addEmbedderComponents(deployState, cluster, spec);
addModelEvaluation(spec, cluster, context);
addModelEvaluationBundles(cluster);
addProcessing(deployState, spec, cluster);
addSearch(deployState, spec, cluster);
addDocproc(deployState, spec, cluster);
addDocumentApi(spec, cluster);
cluster.addDefaultHandlersExceptStatus();
addStatusHandlers(cluster, context.getDeployState().isHosted());
addUserHandlers(deployState, cluster, spec);
addHttp(deployState, spec, cluster, context);
addAccessLogs(deployState, cluster, spec);
addNodes(cluster, spec, context);
addServerProviders(deployState, spec, cluster);
addDeploymentSpecConfig(cluster, context, deployState.getDeployLogger());
addZooKeeper(cluster, spec);
addParameterStoreValidationHandler(cluster, deployState);
}
private void addParameterStoreValidationHandler(ApplicationContainerCluster cluster, DeployState deployState) {
if(deployState.isHosted()) {
cluster.addPlatformBundle(PlatformBundles.absoluteBundlePath("jdisc-cloud-aws"));
}
if (deployState.zone().system().isPublic()) {
BindingPattern bindingPattern = SystemBindingPattern.fromHttpPath("/validate-secret-store");
Handler<AbstractConfigProducer<?>> handler = new Handler<>(
new ComponentModel("com.yahoo.jdisc.cloud.aws.AwsParameterStoreValidationHandler", null, "jdisc-cloud-aws", null));
handler.addServerBindings(bindingPattern);
cluster.addComponent(handler);
}
}
private void addZooKeeper(ApplicationContainerCluster cluster, Element spec) {
if ( ! hasZooKeeper(spec)) return;
Element nodesElement = XML.getChild(spec, "nodes");
boolean isCombined = nodesElement != null && nodesElement.hasAttribute("of");
if (isCombined) {
throw new IllegalArgumentException("A combined cluster cannot run ZooKeeper");
}
long nonRetiredNodes = cluster.getContainers().stream().filter(c -> !c.isRetired()).count();
if (nonRetiredNodes < MIN_ZOOKEEPER_NODE_COUNT || nonRetiredNodes > MAX_ZOOKEEPER_NODE_COUNT || nonRetiredNodes % 2 == 0) {
throw new IllegalArgumentException("Cluster with ZooKeeper needs an odd number of nodes, between " +
MIN_ZOOKEEPER_NODE_COUNT + " and " + MAX_ZOOKEEPER_NODE_COUNT +
", have " + nonRetiredNodes + " non-retired");
}
cluster.addSimpleComponent("com.yahoo.vespa.curator.Curator", null, "zkfacade");
cluster.getContainers().forEach(ContainerModelBuilder::addReconfigurableZooKeeperServerComponents);
}
public static void addReconfigurableZooKeeperServerComponents(Container container) {
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.ReconfigurableVespaZooKeeperServer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.Reconfigurer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.VespaZooKeeperAdminImpl", container));
}
private static SimpleComponent zookeeperComponent(String idSpec, Container container) {
String configId = container.getConfigId();
return new SimpleComponent(new ComponentModel(idSpec, null, "zookeeper-server", configId));
}
private void addSecretStore(ApplicationContainerCluster cluster, Element spec, DeployState deployState) {
Element secretStoreElement = XML.getChild(spec, "secret-store");
if (secretStoreElement != null) {
String type = secretStoreElement.getAttribute("type");
if ("cloud".equals(type)) {
addCloudSecretStore(cluster, secretStoreElement, deployState);
} else {
SecretStore secretStore = new SecretStore();
for (Element group : XML.getChildren(secretStoreElement, "group")) {
secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment"));
}
cluster.setSecretStore(secretStore);
}
}
}
private void addCloudSecretStore(ApplicationContainerCluster cluster, Element secretStoreElement, DeployState deployState) {
if ( ! deployState.isHosted()) return;
if ( ! cluster.getZone().system().isPublic())
throw new IllegalArgumentException("Cloud secret store is not supported in non-public system, see the documentation");
CloudSecretStore cloudSecretStore = new CloudSecretStore();
Map<String, TenantSecretStore> secretStoresByName = deployState.getProperties().tenantSecretStores()
.stream()
.collect(Collectors.toMap(
TenantSecretStore::getName,
store -> store
));
Element store = XML.getChild(secretStoreElement, "store");
for (Element group : XML.getChildren(store, "aws-parameter-store")) {
String account = group.getAttribute("account");
String region = group.getAttribute("aws-region");
TenantSecretStore secretStore = secretStoresByName.get(account);
if (secretStore == null)
throw new IllegalArgumentException("No configured secret store named " + account);
if (secretStore.getExternalId().isEmpty())
throw new IllegalArgumentException("No external ID has been set");
cloudSecretStore.addConfig(account, region, secretStore.getAwsId(), secretStore.getRole(), secretStore.getExternalId().get());
}
cluster.addComponent(cloudSecretStore);
}
private void addDeploymentSpecConfig(ApplicationContainerCluster cluster, ConfigModelContext context, DeployLogger deployLogger) {
if ( ! context.getDeployState().isHosted()) return;
Optional<DeploymentSpec> deploymentSpec = app.getDeployment().map(DeploymentSpec::fromXml);
if (deploymentSpec.isEmpty()) return;
for (var deprecatedElement : deploymentSpec.get().deprecatedElements()) {
deployLogger.logApplicationPackage(WARNING, deprecatedElement.humanReadableString());
}
addIdentityProvider(cluster,
context.getDeployState().getProperties().configServerSpecs(),
context.getDeployState().getProperties().loadBalancerName(),
context.getDeployState().getProperties().ztsUrl(),
context.getDeployState().getProperties().athenzDnsSuffix(),
context.getDeployState().zone(),
deploymentSpec.get());
addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getEndpoints(), deploymentSpec.get());
}
private void addRotationProperties(ApplicationContainerCluster cluster, Zone zone, Set<ContainerEndpoint> endpoints, DeploymentSpec spec) {
cluster.getContainers().forEach(container -> {
setRotations(container, endpoints, cluster.getName());
container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec)));
});
}
private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) {
Optional<DeploymentInstanceSpec> instance = spec.instance(app.getApplicationId().instance());
if (instance.isEmpty()) return false;
return instance.get().zones().stream()
.anyMatch(declaredZone -> declaredZone.concerns(zone.environment(), Optional.of(zone.region())) &&
declaredZone.active());
}
private void setRotations(Container container, Set<ContainerEndpoint> endpoints, String containerClusterName) {
var rotationsProperty = endpoints.stream()
.filter(endpoint -> endpoint.clusterId().equals(containerClusterName))
.filter(endpoint -> endpoint.scope() == ApplicationClusterEndpoint.Scope.global)
.flatMap(endpoint -> endpoint.names().stream())
.collect(Collectors.toCollection(() -> new LinkedHashSet<>()));
container.setProp("rotations", String.join(",", rotationsProperty));
}
private static void addEmbedderComponents(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element node : XML.getChildren(spec, "embedder")) {
Element transformed = EmbedderConfig.transform(deployState, node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, transformed));
}
}
private void addConfiguredComponents(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element components : XML.getChildren(spec, "components")) {
addIncludes(components);
addConfiguredComponents(deployState, cluster, components, "component");
}
addConfiguredComponents(deployState, cluster, spec, "component");
}
protected void addStatusHandlers(ApplicationContainerCluster cluster, boolean isHostedVespa) {
if (isHostedVespa) {
String name = "status.html";
Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_SETTING));
cluster.addComponent(
new FileStatusHandlerComponent(
name + "-status-handler",
statusFile.orElse(HOSTED_VESPA_STATUS_FILE),
SystemBindingPattern.fromHttpPath("/" + name)));
} else {
cluster.addVipHandler();
}
}
private void addServerProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
addConfiguredComponents(deployState, cluster, spec, "server");
}
protected void addAccessLogs(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
List<Element> accessLogElements = getAccessLogElements(spec);
if (cluster.isHostedVespa() && !accessLogElements.isEmpty()) {
accessLogElements.clear();
log.logApplicationPackage(
Level.WARNING, "Applications are not allowed to override the 'accesslog' element");
} else {
for (Element accessLog : accessLogElements) {
AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(cluster::addComponent);
}
}
if (accessLogElements.isEmpty() && deployState.getAccessLoggingEnabledByDefault())
cluster.addDefaultSearchAccessLog();
if (cluster.getAllComponents().stream().anyMatch(component -> component instanceof AccessLogComponent)) {
if (cluster.isHostedVespa() || deployState.getVespaVersion().getMajor() == 8) {
cluster.addComponent(new ConnectionLogComponent(cluster, FileConnectionLog.class, "access"));
} else {
cluster.addComponent(new ConnectionLogComponent(cluster, FileConnectionLog.class, "qrs"));
}
}
}
private List<Element> getAccessLogElements(Element spec) {
return XML.getChildren(spec, "accesslog");
}
protected void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element httpElement = XML.getChild(spec, "http");
if (httpElement != null) {
cluster.setHttp(buildHttp(deployState, cluster, httpElement));
}
if (isHostedTenantApplication(context)) {
addHostedImplicitHttpIfNotPresent(deployState, cluster);
addHostedImplicitAccessControlIfNotPresent(deployState, cluster);
addDefaultConnectorHostedFilterBinding(cluster);
addAdditionalHostedConnector(deployState, cluster, context);
}
}
private void addDefaultConnectorHostedFilterBinding(ApplicationContainerCluster cluster) {
cluster.getHttp().getAccessControl()
.ifPresent(accessControl -> accessControl.configureDefaultHostedConnector(cluster.getHttp())); ;
}
private void addAdditionalHostedConnector(DeployState deployState, ApplicationContainerCluster cluster, ConfigModelContext context) {
JettyHttpServer server = cluster.getHttp().getHttpServer().get();
String serverName = server.getComponentId().getName();
HostedSslConnectorFactory connectorFactory;
Collection<String> tlsCiphersOverride = deployState.getProperties().tlsCiphersOverride();
boolean proxyProtocolMixedMode = deployState.getProperties().featureFlags().enableProxyProtocolMixedMode();
if (deployState.endpointCertificateSecrets().isPresent()) {
boolean authorizeClient = deployState.zone().system().isPublic();
if (authorizeClient && deployState.tlsClientAuthority().isEmpty()) {
throw new IllegalArgumentException("Client certificate authority security/clients.pem is missing - " +
"see: https:
}
EndpointCertificateSecrets endpointCertificateSecrets = deployState.endpointCertificateSecrets().get();
boolean enforceHandshakeClientAuth = cluster.getHttp().getAccessControl()
.map(accessControl -> accessControl.clientAuthentication)
.map(clientAuth -> clientAuth == AccessControl.ClientAuthentication.need)
.orElse(false);
connectorFactory = authorizeClient
? HostedSslConnectorFactory.withProvidedCertificateAndTruststore(
serverName, endpointCertificateSecrets, getTlsClientAuthorities(deployState), tlsCiphersOverride, proxyProtocolMixedMode)
: HostedSslConnectorFactory.withProvidedCertificate(
serverName, endpointCertificateSecrets, enforceHandshakeClientAuth, tlsCiphersOverride, proxyProtocolMixedMode);
} else {
connectorFactory = HostedSslConnectorFactory.withDefaultCertificateAndTruststore(serverName, tlsCiphersOverride, proxyProtocolMixedMode);
}
cluster.getHttp().getAccessControl().ifPresent(accessControl -> accessControl.configureHostedConnector(connectorFactory));
server.addConnector(connectorFactory);
}
/*
Return trusted certificates as a PEM encoded string containing the concatenation of
trusted certs from the application package and all operator certificates.
*/
String getTlsClientAuthorities(DeployState deployState) {
List<X509Certificate> trustedCertificates = deployState.tlsClientAuthority()
.map(X509CertificateUtils::certificateListFromPem)
.orElse(Collections.emptyList());
ArrayList<X509Certificate> x509Certificates = new ArrayList<>(trustedCertificates);
x509Certificates.addAll(deployState.getProperties().operatorCertificates());
return X509CertificateUtils.toPem(x509Certificates);
}
private static boolean isHostedTenantApplication(ConfigModelContext context) {
var deployState = context.getDeployState();
boolean isTesterApplication = deployState.getProperties().applicationId().instance().isTester();
return deployState.isHosted() && context.getApplicationType() == ApplicationType.DEFAULT && !isTesterApplication;
}
private static void addHostedImplicitHttpIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
if (cluster.getHttp() == null) {
cluster.setHttp(new Http(new FilterChains(cluster)));
}
JettyHttpServer httpServer = cluster.getHttp().getHttpServer().orElse(null);
if (httpServer == null) {
httpServer = new JettyHttpServer("DefaultHttpServer", cluster, deployState);
cluster.getHttp().setHttpServer(httpServer);
}
int defaultPort = Defaults.getDefaults().vespaWebServicePort();
boolean defaultConnectorPresent = httpServer.getConnectorFactories().stream().anyMatch(connector -> connector.getListenPort() == defaultPort);
if (!defaultConnectorPresent) {
httpServer.addConnector(new ConnectorFactory.Builder("SearchServer", defaultPort).build());
}
}
private void addHostedImplicitAccessControlIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
Http http = cluster.getHttp();
if (http.getAccessControl().isPresent()) return;
AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null);
if (tenantDomain == null) return;
new AccessControl.Builder(tenantDomain.value())
.setHandlers(cluster)
.clientAuthentication(AccessControl.ClientAuthentication.need)
.build()
.configureHttpFilterChains(http);
}
private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement) {
Http http = new HttpBuilder().build(deployState, cluster, httpElement);
if (networking == Networking.disable)
http.removeAllServers();
return http;
}
private void addDocumentApi(Element spec, ApplicationContainerCluster cluster) {
ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec);
if (containerDocumentApi == null) return;
cluster.setDocumentApi(containerDocumentApi);
}
private void addDocproc(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec);
if (containerDocproc == null) return;
cluster.setDocproc(containerDocproc);
ContainerDocproc.Options docprocOptions = containerDocproc.options;
cluster.setMbusParams(new ApplicationContainerCluster.MbusParams(
docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory));
}
private void addSearch(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
Element searchElement = XML.getChild(spec, "search");
if (searchElement == null) return;
addIncludes(searchElement);
cluster.setSearch(buildSearch(deployState, cluster, searchElement));
addSearchHandler(cluster, searchElement);
addGUIHandler(cluster);
validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private void addModelEvaluation(Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element modelEvaluationElement = XML.getChild(spec, "model-evaluation");
if (modelEvaluationElement == null) return;
RankProfileList profiles =
context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty;
Element onnxElement = XML.getChild(modelEvaluationElement, "onnx");
Element modelsElement = XML.getChild(onnxElement, "models");
for (Element modelElement : XML.getChildren(modelsElement, "model") ) {
OnnxModel onnxModel = profiles.getOnnxModels().asMap().get(modelElement.getAttribute("name"));
if (onnxModel == null)
continue;
onnxModel.setStatelessExecutionMode(getStringValue(modelElement, "execution-mode", null));
onnxModel.setStatelessInterOpThreads(getIntValue(modelElement, "interop-threads", -1));
onnxModel.setStatelessIntraOpThreads(getIntValue(modelElement, "intraop-threads", -1));
}
cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles));
}
private String getStringValue(Element element, String name, String defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? child.getTextContent() : defaultValue;
}
private int getIntValue(Element element, String name, int defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? Integer.parseInt(child.getTextContent()) : defaultValue;
}
protected void addModelEvaluationBundles(ApplicationContainerCluster cluster) {
/* These bundles are added to all application container clusters, even if they haven't
* declared 'model-evaluation' in services.xml, because there are many public API packages
* in the model-evaluation bundle that could be used by customer code. */
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_EVALUATION_BUNDLE_FILE);
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_INTEGRATION_BUNDLE_FILE);
}
private void addProcessing(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
Element processingElement = XML.getChild(spec, "processing");
if (processingElement == null) return;
addIncludes(processingElement);
cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement),
serverBindings(processingElement, ProcessingChains.defaultBindings).toArray(BindingPattern[]::new));
validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private ContainerSearch buildSearch(DeployState deployState, ApplicationContainerCluster containerCluster, Element producerSpec) {
SearchChains searchChains = new DomSearchChainsBuilder()
.build(deployState, containerCluster, producerSpec);
ContainerSearch containerSearch = new ContainerSearch(containerCluster, searchChains, new ContainerSearch.Options());
applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch);
containerSearch.setQueryProfiles(deployState.getQueryProfiles());
containerSearch.setSemanticRules(deployState.getSemanticRules());
return containerSearch;
}
private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) {
PageTemplates.validate(applicationPackage);
containerSearch.setPageTemplates(PageTemplates.create(applicationPackage));
}
private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element component: XML.getChildren(spec, "handler")) {
cluster.addComponent(
new DomHandlerBuilder(cluster).build(deployState, cluster, component));
}
}
private void checkVersion(Element spec) {
String version = spec.getAttribute("version");
if ( ! Version.fromString(version).equals(new Version(1)))
throw new IllegalArgumentException("Expected container version to be 1.0, but got " + version);
}
private void addNodes(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
if (standaloneBuilder)
addStandaloneNode(cluster, context.getDeployState());
else
addNodesFromXml(cluster, spec, context);
}
private void addStandaloneNode(ApplicationContainerCluster cluster, DeployState deployState) {
ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), deployState);
cluster.addContainers(Collections.singleton(container));
}
private static String buildJvmGCOptions(ConfigModelContext context, String jvmGCOptions) {
return new JvmGcOptions(context.getDeployState(), jvmGCOptions).build();
}
private static String getJvmOptions(ApplicationContainerCluster cluster,
Element nodesElement,
DeployState deployState,
boolean legacyOptions) {
return new JvmOptions(cluster, nodesElement, deployState, legacyOptions).build();
}
private static String extractAttribute(Element element, String attrName) {
return element.hasAttribute(attrName) ? element.getAttribute(attrName) : null;
}
private void extractJvmOptions(List<ApplicationContainer> nodes,
ApplicationContainerCluster cluster,
Element nodesElement,
ConfigModelContext context) {
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) {
extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context);
} else {
extractJvmTag(nodes, cluster, nodesElement, jvmElement, context);
}
}
private void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployState(), true));
if (cluster.getJvmGCOptions().isEmpty()) {
String jvmGCOptions = extractAttribute(nodesElement, VespaDomBuilder.JVM_GC_OPTIONS);
cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions));
}
applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
}
private void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, Element jvmElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployState(), false));
applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
String jvmGCOptions = extractAttribute(jvmElement, VespaDomBuilder.GC_OPTIONS);
cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions));
}
/**
* Add nodes to cluster according to the given containerElement.
*
* Note: DO NOT change allocation behaviour to allow version X and Y of the config-model to allocate a different set
* of nodes. Such changes must be guarded by a common condition (e.g. feature flag) so the behaviour can be changed
* simultaneously for all active config models.
*/
private void addNodesFromXml(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) {
Element nodesElement = XML.getChild(containerElement, "nodes");
if (nodesElement == null) {
cluster.addContainers(allocateWithoutNodesTag(cluster, context));
} else {
List<ApplicationContainer> nodes = createNodes(cluster, containerElement, nodesElement, context);
extractJvmOptions(nodes, cluster, nodesElement, context);
applyDefaultPreload(nodes, nodesElement);
String environmentVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT));
if (!environmentVars.isEmpty()) {
cluster.setEnvironmentVars(environmentVars);
}
if (useCpuSocketAffinity(nodesElement))
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
}
private static String getEnvironmentVariables(Element environmentVariables) {
StringBuilder sb = new StringBuilder();
if (environmentVariables != null) {
for (Element var: XML.getChildren(environmentVariables)) {
sb.append(var.getNodeName()).append('=').append(var.getTextContent()).append(' ');
}
}
return sb.toString();
}
private static void applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) {
if (memoryPercentage == null || memoryPercentage.isEmpty()) return;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
try {
cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage));
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
}
}
/** Allocate a container cluster without a nodes tag */
private List<ApplicationContainer> allocateWithoutNodesTag(ApplicationContainerCluster cluster, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
HostSystem hostSystem = cluster.hostSystem();
if (deployState.isHosted()) {
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(deployState.getWantedNodeVespaVersion())
.dockerImageRepository(deployState.getWantedDockerImageRepo())
.build();
int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1;
deployState.getDeployLogger().logApplicationPackage(Level.INFO, "Using " + nodeCount +
" nodes in " + cluster);
Capacity capacity = Capacity.from(new ClusterResources(nodeCount, 1, NodeResources.unspecified()),
false,
!deployState.getProperties().isBootstrap());
var hosts = hostSystem.allocateHosts(clusterSpec, capacity, log);
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
else {
return singleHostContainerCluster(cluster, hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC), context);
}
}
private List<ApplicationContainer> singleHostContainerCluster(ApplicationContainerCluster cluster, HostResource host, ConfigModelContext context) {
ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, context.getDeployState());
node.setHostResource(host);
node.initService(context.getDeployState());
return List.of(node);
}
private List<ApplicationContainer> createNodesFromNodeCount(ApplicationContainerCluster cluster, Element containerElement, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().hostSystem(),
ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
log,
hasZooKeeper(containerElement));
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromNodeType(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodeType type = NodeType.valueOf(nodesElement.getAttribute("type"));
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(context.getDeployState().getWantedNodeVespaVersion())
.dockerImageRepository(context.getDeployState().getWantedDockerImageRepo())
.build();
Map<HostResource, ClusterMembership> hosts =
cluster.getRoot().hostSystem().allocateHosts(clusterSpec,
Capacity.fromRequiredNodeType(type), log);
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromContentServiceReference(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodeSpecification;
try {
nodeSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(cluster + " contains an invalid reference", e);
}
String referenceId = nodesElement.getAttribute("of");
cluster.setHostClusterId(referenceId);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodeSpecification,
referenceId,
cluster.getRoot().hostSystem(),
context.getDeployLogger());
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromHosts(Map<HostResource, ClusterMembership> hosts,
ApplicationContainerCluster cluster,
DeployState deployState) {
List<ApplicationContainer> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), deployState);
container.setHostResource(entry.getKey());
container.initService(deployState);
nodes.add(container);
}
return nodes;
}
private List<ApplicationContainer> createNodesFromNodeList(DeployState deployState, ApplicationContainerCluster cluster, Element nodesElement) {
List<ApplicationContainer> nodes = new ArrayList<>();
int nodeIndex = 0;
for (Element nodeElem: XML.getChildren(nodesElement, "node")) {
nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem));
nodeIndex++;
}
return nodes;
}
private static boolean useCpuSocketAffinity(Element nodesElement) {
if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME))
return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME));
else
return false;
}
private static void applyNodesTagJvmArgs(List<ApplicationContainer> containers, String jvmArgs) {
for (Container container: containers) {
if (container.getAssignedJvmOptions().isEmpty())
container.prependJvmOptions(jvmArgs);
}
}
private static void applyDefaultPreload(List<ApplicationContainer> containers, Element nodesElement) {
if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return;
for (Container container: containers)
container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME));
}
private void addSearchHandler(ApplicationContainerCluster cluster, Element searchElement) {
cluster.addComponent(new ProcessingHandler<>(cluster.getSearch().getChains(),
"com.yahoo.search.searchchain.ExecutionFactory"));
cluster.addComponent(
new SearchHandler(
cluster,
serverBindings(searchElement, SearchHandler.DEFAULT_BINDING),
ContainerThreadpool.UserOptions.fromXml(searchElement).orElse(null)));
}
private void addGUIHandler(ApplicationContainerCluster cluster) {
Handler<?> guiHandler = new GUIHandler();
guiHandler.addServerBindings(SystemBindingPattern.fromHttpPath(GUIHandler.BINDING_PATH));
cluster.addComponent(guiHandler);
}
private List<BindingPattern> serverBindings(Element searchElement, BindingPattern... defaultBindings) {
List<Element> bindings = XML.getChildren(searchElement, "binding");
if (bindings.isEmpty())
return List.of(defaultBindings);
return toBindingList(bindings);
}
private List<BindingPattern> toBindingList(List<Element> bindingElements) {
List<BindingPattern> result = new ArrayList<>();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
result.add(UserBindingPattern.fromPattern(text));
}
return result;
}
private ContainerDocumentApi buildDocumentApi(ApplicationContainerCluster cluster, Element spec) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
return new ContainerDocumentApi(cluster, documentApiOptions);
}
private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
Element docprocElement = XML.getChild(spec, "document-processing");
if (docprocElement == null)
return null;
addIncludes(docprocElement);
DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement);
ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement);
return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder);
}
private void addIncludes(Element parentElement) {
List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE);
if (includes.isEmpty()) {
return;
}
if (app == null) {
throw new IllegalArgumentException("Element <include> given in XML config, but no application package given.");
}
for (Element include : includes) {
addInclude(parentElement, include);
}
}
private void addInclude(Element parentElement, Element include) {
String dirName = include.getAttribute(IncludeDirs.DIR);
app.validateIncludeDir(dirName);
List<Element> includedFiles = Xml.allElemsFromPath(app, dirName);
for (Element includedFile : includedFiles) {
List<Element> includedSubElements = XML.getChildren(includedFile);
for (Element includedSubElement : includedSubElements) {
Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true);
parentElement.appendChild(copiedNode);
}
}
}
private static void addConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster,
Element spec, String componentName) {
for (Element node : XML.getChildren(spec, componentName)) {
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private static void validateAndAddConfiguredComponents(DeployState deployState,
ContainerCluster<? extends Container> cluster,
Element spec, String componentName,
Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private void addIdentityProvider(ApplicationContainerCluster cluster,
List<ConfigServerSpec> configServerSpecs,
HostName loadBalancerName,
URI ztsUrl,
String athenzDnsSuffix,
Zone zone,
DeploymentSpec spec) {
spec.athenzDomain()
.ifPresent(domain -> {
AthenzService service = spec.instance(app.getApplicationId().instance())
.flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region()))
.or(() -> spec.athenzService())
.orElseThrow(() -> new IllegalArgumentException("Missing Athenz service configuration in instance '" +
app.getApplicationId().instance() + "'"));
String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix;
IdentityProvider identityProvider = new IdentityProvider(domain,
service,
getLoadBalancerName(loadBalancerName, configServerSpecs),
ztsUrl,
zoneDnsSuffix,
zone);
cluster.addComponent(identityProvider);
cluster.getContainers().forEach(container -> {
container.setProp("identity.domain", domain.value());
container.setProp("identity.service", service.value());
});
});
}
private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) {
return Optional.ofNullable(loadbalancerName)
.orElseGet(
() -> HostName.of(configServerSpecs.stream()
.findFirst()
.map(ConfigServerSpec::getHostName)
.orElse("unknown")
));
}
private static boolean hasZooKeeper(Element spec) {
return XML.getChild(spec, "zookeeper") != null;
}
/** Disallow renderers named "XmlRenderer" or "JsonRenderer" */
private static void validateRendererElement(Element element) {
String idAttr = element.getAttribute("id");
if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) {
throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr));
}
}
public static boolean isContainerTag(Element element) {
return CONTAINER_TAG.equals(element.getTagName());
}
/**
* Validates JVM options and logs a warning or fails deployment (depending on feature flag)
* if anyone of them has invalid syntax or is an option that is unsupported for the running system.
*/
private static class JvmOptions {
private static final Pattern validPattern = Pattern.compile("-[a-zA-z0-9=:./,+-]+");
private static final Pattern invalidInHostedatttern = Pattern.compile("-Xrunjdwp:transport=.*");
private final ContainerCluster<?> cluster;
private final Element nodesElement;
private final DeployLogger logger;
private final boolean legacyOptions;
private final boolean isHosted;
public JvmOptions(ContainerCluster<?> cluster, Element nodesElement, DeployState deployState, boolean legacyOptions) {
this.cluster = cluster;
this.nodesElement = nodesElement;
this.logger = deployState.getDeployLogger();
this.legacyOptions = legacyOptions;
this.isHosted = deployState.isHosted();
}
String build() {
if (legacyOptions)
return buildLegacyOptions();
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) return "";
String jvmOptions = jvmElement.getAttribute(VespaDomBuilder.OPTIONS);
if (jvmOptions.isEmpty()) return "";
validateJvmOptions(jvmOptions);
return jvmOptions;
}
String buildLegacyOptions() {
String jvmOptions = null;
if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS);
if (! jvmOptions.isEmpty())
logger.logApplicationPackage(WARNING, "'jvm-options' is deprecated and will be removed in Vespa 8." +
" Please merge 'jvm-options' into 'options' or 'gc-options' in 'jvm' element." +
" See https:
}
validateJvmOptions(jvmOptions);
return jvmOptions;
}
private void validateJvmOptions(String jvmOptions) {
if (jvmOptions == null || jvmOptions.isEmpty()) return;
String[] optionList = jvmOptions.split(" ");
List<String> invalidOptions = Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> !Pattern.matches(validPattern.pattern(), option))
.sorted()
.collect(Collectors.toList());
if (isHosted)
invalidOptions.addAll(Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> Pattern.matches(invalidInHostedatttern.pattern(), option))
.sorted()
.collect(Collectors.toList()));
if (invalidOptions.isEmpty()) return;
String message = "Invalid or misplaced JVM options in services.xml: " +
String.join(",", invalidOptions) + "." +
" See https:
if (isHosted)
throw new IllegalArgumentException(message);
else
logger.logApplicationPackage(WARNING, message);
}
}
/**
* Validates JVM GC options and logs a warning or fails deployment (depending on feature flag)
* if anyone of them has invalid syntax or is an option that is unsupported for the running system
* (e.g. uses CMS options for hosted Vespa, which uses JDK 17).
*/
private static class JvmGcOptions {
private static final Pattern validPattern = Pattern.compile("-XX:[+-]*[a-zA-z0-9=]+");
private static final Pattern invalidCMSPattern = Pattern.compile("-XX:[+-]\\w*CMS[a-zA-z0-9=]+");
private final DeployState deployState;
private final String jvmGcOptions;
private final DeployLogger logger;
private final boolean isHosted;
public JvmGcOptions(DeployState deployState, String jvmGcOptions) {
this.deployState = deployState;
this.jvmGcOptions = jvmGcOptions;
this.logger = deployState.getDeployLogger();
this.isHosted = deployState.isHosted();
}
private String build() {
String options = deployState.getProperties().jvmGCOptions();
if (jvmGcOptions != null) {
options = jvmGcOptions;
String[] optionList = options.split(" ");
List<String> invalidOptions = Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> !Pattern.matches(validPattern.pattern(), option))
.collect(Collectors.toList());
if (isHosted) {
invalidOptions.addAll(Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> Pattern.matches(invalidCMSPattern.pattern(), option) ||
option.equals("-XX:+UseConcMarkSweepGC"))
.collect(Collectors.toList()));
}
logOrFailInvalidOptions(invalidOptions);
}
if (options == null || options.isEmpty())
options = deployState.isHosted() ? ContainerCluster.PARALLEL_GC : ContainerCluster.G1GC;
return options;
}
private void logOrFailInvalidOptions(List<String> options) {
if (options.isEmpty()) return;
Collections.sort(options);
String message = "Invalid or misplaced JVM GC options in services.xml: " +
String.join(",", options) + "." +
" See https:
if (isHosted)
throw new IllegalArgumentException(message);
else
logger.logApplicationPackage(WARNING, message);
}
}
} |
Yes, thanks! | private List<ApplicationContainer> createNodes(ApplicationContainerCluster cluster, Element containerElement, Element nodesElement, ConfigModelContext context) {
if (nodesElement.hasAttribute("type"))
return createNodesFromNodeType(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("of")) {
List<ApplicationContainer> containers = createNodesFromContentServiceReference(cluster, nodesElement, context);
log.log(WARNING, "Declaring combined cluster with <nodes of=\"...\"> is deprecated without " +
"replacement, and the feature will be removed in Vespa 9. Use separate container and " +
"content clusters instead");
return containers;
} else if (nodesElement.hasAttribute("count"))
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else if (cluster.isHostedVespa() && cluster.getZone().environment().isManuallyDeployed())
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else
return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement);
} | log.log(WARNING, "Declaring combined cluster with <nodes of=\"...\"> is deprecated without " + | private List<ApplicationContainer> createNodes(ApplicationContainerCluster cluster, Element containerElement, Element nodesElement, ConfigModelContext context) {
if (nodesElement.hasAttribute("type"))
return createNodesFromNodeType(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("of")) {
List<ApplicationContainer> containers = createNodesFromContentServiceReference(cluster, nodesElement, context);
log.log(WARNING, "Declaring combined cluster with <nodes of=\"...\"> is deprecated without " +
"replacement, and the feature will be removed in Vespa 9. Use separate container and " +
"content clusters instead");
return containers;
} else if (nodesElement.hasAttribute("count"))
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else if (cluster.isHostedVespa() && cluster.getZone().environment().isManuallyDeployed())
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else
return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement);
} | class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html");
private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE";
private static final String CONTAINER_TAG = "container";
private static final String ENVIRONMENT_VARIABLES_ELEMENT = "environment-variables";
private static final int MIN_ZOOKEEPER_NODE_COUNT = 1;
private static final int MAX_ZOOKEEPER_NODE_COUNT = 7;
public enum Networking { disable, enable }
private ApplicationPackage app;
private final boolean standaloneBuilder;
private final Networking networking;
private final boolean rpcServerEnabled;
private final boolean httpServerEnabled;
protected DeployLogger log;
public static final List<ConfigModelId> configModelIds = ImmutableList.of(ConfigModelId.fromName(CONTAINER_TAG));
private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName();
private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName();
public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) {
super(ContainerModel.class);
this.standaloneBuilder = standaloneBuilder;
this.networking = networking;
this.rpcServerEnabled = !standaloneBuilder;
this.httpServerEnabled = networking == Networking.enable;
}
@Override
public List<ConfigModelId> handlesElements() {
return configModelIds;
}
@Override
public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) {
log = modelContext.getDeployLogger();
app = modelContext.getApplicationPackage();
checkVersion(spec);
ApplicationContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
cluster.setMessageBusEnabled(rpcServerEnabled);
cluster.setRpcServerEnabled(rpcServerEnabled);
cluster.setHttpServerEnabled(httpServerEnabled);
model.setCluster(cluster);
}
private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilder<ApplicationContainerCluster>() {
@Override
protected ApplicationContainerCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) {
return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(),
modelContext.getProducerId(), deployState);
}
}.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec);
}
private void addClusterContent(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
DocumentFactoryBuilder.buildDocumentFactories(cluster, spec);
addConfiguredComponents(deployState, cluster, spec);
addSecretStore(cluster, spec, deployState);
addEmbedderComponents(deployState, cluster, spec);
addModelEvaluation(spec, cluster, context);
addModelEvaluationBundles(cluster);
addProcessing(deployState, spec, cluster);
addSearch(deployState, spec, cluster);
addDocproc(deployState, spec, cluster);
addDocumentApi(spec, cluster);
cluster.addDefaultHandlersExceptStatus();
addStatusHandlers(cluster, context.getDeployState().isHosted());
addUserHandlers(deployState, cluster, spec);
addHttp(deployState, spec, cluster, context);
addAccessLogs(deployState, cluster, spec);
addNodes(cluster, spec, context);
addServerProviders(deployState, spec, cluster);
addDeploymentSpecConfig(cluster, context, deployState.getDeployLogger());
addZooKeeper(cluster, spec);
addParameterStoreValidationHandler(cluster, deployState);
}
private void addParameterStoreValidationHandler(ApplicationContainerCluster cluster, DeployState deployState) {
if(deployState.isHosted()) {
cluster.addPlatformBundle(PlatformBundles.absoluteBundlePath("jdisc-cloud-aws"));
}
if (deployState.zone().system().isPublic()) {
BindingPattern bindingPattern = SystemBindingPattern.fromHttpPath("/validate-secret-store");
Handler<AbstractConfigProducer<?>> handler = new Handler<>(
new ComponentModel("com.yahoo.jdisc.cloud.aws.AwsParameterStoreValidationHandler", null, "jdisc-cloud-aws", null));
handler.addServerBindings(bindingPattern);
cluster.addComponent(handler);
}
}
private void addZooKeeper(ApplicationContainerCluster cluster, Element spec) {
if ( ! hasZooKeeper(spec)) return;
Element nodesElement = XML.getChild(spec, "nodes");
boolean isCombined = nodesElement != null && nodesElement.hasAttribute("of");
if (isCombined) {
throw new IllegalArgumentException("A combined cluster cannot run ZooKeeper");
}
long nonRetiredNodes = cluster.getContainers().stream().filter(c -> !c.isRetired()).count();
if (nonRetiredNodes < MIN_ZOOKEEPER_NODE_COUNT || nonRetiredNodes > MAX_ZOOKEEPER_NODE_COUNT || nonRetiredNodes % 2 == 0) {
throw new IllegalArgumentException("Cluster with ZooKeeper needs an odd number of nodes, between " +
MIN_ZOOKEEPER_NODE_COUNT + " and " + MAX_ZOOKEEPER_NODE_COUNT +
", have " + nonRetiredNodes + " non-retired");
}
cluster.addSimpleComponent("com.yahoo.vespa.curator.Curator", null, "zkfacade");
cluster.getContainers().forEach(ContainerModelBuilder::addReconfigurableZooKeeperServerComponents);
}
public static void addReconfigurableZooKeeperServerComponents(Container container) {
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.ReconfigurableVespaZooKeeperServer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.Reconfigurer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.VespaZooKeeperAdminImpl", container));
}
private static SimpleComponent zookeeperComponent(String idSpec, Container container) {
String configId = container.getConfigId();
return new SimpleComponent(new ComponentModel(idSpec, null, "zookeeper-server", configId));
}
private void addSecretStore(ApplicationContainerCluster cluster, Element spec, DeployState deployState) {
Element secretStoreElement = XML.getChild(spec, "secret-store");
if (secretStoreElement != null) {
String type = secretStoreElement.getAttribute("type");
if ("cloud".equals(type)) {
addCloudSecretStore(cluster, secretStoreElement, deployState);
} else {
SecretStore secretStore = new SecretStore();
for (Element group : XML.getChildren(secretStoreElement, "group")) {
secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment"));
}
cluster.setSecretStore(secretStore);
}
}
}
private void addCloudSecretStore(ApplicationContainerCluster cluster, Element secretStoreElement, DeployState deployState) {
if ( ! deployState.isHosted()) return;
if ( ! cluster.getZone().system().isPublic())
throw new IllegalArgumentException("Cloud secret store is not supported in non-public system, see the documentation");
CloudSecretStore cloudSecretStore = new CloudSecretStore();
Map<String, TenantSecretStore> secretStoresByName = deployState.getProperties().tenantSecretStores()
.stream()
.collect(Collectors.toMap(
TenantSecretStore::getName,
store -> store
));
Element store = XML.getChild(secretStoreElement, "store");
for (Element group : XML.getChildren(store, "aws-parameter-store")) {
String account = group.getAttribute("account");
String region = group.getAttribute("aws-region");
TenantSecretStore secretStore = secretStoresByName.get(account);
if (secretStore == null)
throw new IllegalArgumentException("No configured secret store named " + account);
if (secretStore.getExternalId().isEmpty())
throw new IllegalArgumentException("No external ID has been set");
cloudSecretStore.addConfig(account, region, secretStore.getAwsId(), secretStore.getRole(), secretStore.getExternalId().get());
}
cluster.addComponent(cloudSecretStore);
}
private void addDeploymentSpecConfig(ApplicationContainerCluster cluster, ConfigModelContext context, DeployLogger deployLogger) {
if ( ! context.getDeployState().isHosted()) return;
Optional<DeploymentSpec> deploymentSpec = app.getDeployment().map(DeploymentSpec::fromXml);
if (deploymentSpec.isEmpty()) return;
for (var deprecatedElement : deploymentSpec.get().deprecatedElements()) {
deployLogger.logApplicationPackage(WARNING, deprecatedElement.humanReadableString());
}
addIdentityProvider(cluster,
context.getDeployState().getProperties().configServerSpecs(),
context.getDeployState().getProperties().loadBalancerName(),
context.getDeployState().getProperties().ztsUrl(),
context.getDeployState().getProperties().athenzDnsSuffix(),
context.getDeployState().zone(),
deploymentSpec.get());
addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getEndpoints(), deploymentSpec.get());
}
private void addRotationProperties(ApplicationContainerCluster cluster, Zone zone, Set<ContainerEndpoint> endpoints, DeploymentSpec spec) {
cluster.getContainers().forEach(container -> {
setRotations(container, endpoints, cluster.getName());
container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec)));
});
}
private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) {
Optional<DeploymentInstanceSpec> instance = spec.instance(app.getApplicationId().instance());
if (instance.isEmpty()) return false;
return instance.get().zones().stream()
.anyMatch(declaredZone -> declaredZone.concerns(zone.environment(), Optional.of(zone.region())) &&
declaredZone.active());
}
private void setRotations(Container container, Set<ContainerEndpoint> endpoints, String containerClusterName) {
var rotationsProperty = endpoints.stream()
.filter(endpoint -> endpoint.clusterId().equals(containerClusterName))
.filter(endpoint -> endpoint.scope() == ApplicationClusterEndpoint.Scope.global)
.flatMap(endpoint -> endpoint.names().stream())
.collect(Collectors.toCollection(() -> new LinkedHashSet<>()));
container.setProp("rotations", String.join(",", rotationsProperty));
}
private static void addEmbedderComponents(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element node : XML.getChildren(spec, "embedder")) {
Element transformed = EmbedderConfig.transform(deployState, node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, transformed));
}
}
private void addConfiguredComponents(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element components : XML.getChildren(spec, "components")) {
addIncludes(components);
addConfiguredComponents(deployState, cluster, components, "component");
}
addConfiguredComponents(deployState, cluster, spec, "component");
}
protected void addStatusHandlers(ApplicationContainerCluster cluster, boolean isHostedVespa) {
if (isHostedVespa) {
String name = "status.html";
Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_SETTING));
cluster.addComponent(
new FileStatusHandlerComponent(
name + "-status-handler",
statusFile.orElse(HOSTED_VESPA_STATUS_FILE),
SystemBindingPattern.fromHttpPath("/" + name)));
} else {
cluster.addVipHandler();
}
}
private void addServerProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
addConfiguredComponents(deployState, cluster, spec, "server");
}
protected void addAccessLogs(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
List<Element> accessLogElements = getAccessLogElements(spec);
if (cluster.isHostedVespa() && !accessLogElements.isEmpty()) {
accessLogElements.clear();
log.logApplicationPackage(
Level.WARNING, "Applications are not allowed to override the 'accesslog' element");
} else {
for (Element accessLog : accessLogElements) {
AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(cluster::addComponent);
}
}
if (accessLogElements.isEmpty() && deployState.getAccessLoggingEnabledByDefault())
cluster.addDefaultSearchAccessLog();
if (cluster.getAllComponents().stream().anyMatch(component -> component instanceof AccessLogComponent)) {
if (cluster.isHostedVespa() || deployState.getVespaVersion().getMajor() == 8) {
cluster.addComponent(new ConnectionLogComponent(cluster, FileConnectionLog.class, "access"));
} else {
cluster.addComponent(new ConnectionLogComponent(cluster, FileConnectionLog.class, "qrs"));
}
}
}
private List<Element> getAccessLogElements(Element spec) {
return XML.getChildren(spec, "accesslog");
}
protected void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element httpElement = XML.getChild(spec, "http");
if (httpElement != null) {
cluster.setHttp(buildHttp(deployState, cluster, httpElement));
}
if (isHostedTenantApplication(context)) {
addHostedImplicitHttpIfNotPresent(deployState, cluster);
addHostedImplicitAccessControlIfNotPresent(deployState, cluster);
addDefaultConnectorHostedFilterBinding(cluster);
addAdditionalHostedConnector(deployState, cluster, context);
}
}
private void addDefaultConnectorHostedFilterBinding(ApplicationContainerCluster cluster) {
cluster.getHttp().getAccessControl()
.ifPresent(accessControl -> accessControl.configureDefaultHostedConnector(cluster.getHttp())); ;
}
private void addAdditionalHostedConnector(DeployState deployState, ApplicationContainerCluster cluster, ConfigModelContext context) {
JettyHttpServer server = cluster.getHttp().getHttpServer().get();
String serverName = server.getComponentId().getName();
HostedSslConnectorFactory connectorFactory;
Collection<String> tlsCiphersOverride = deployState.getProperties().tlsCiphersOverride();
boolean proxyProtocolMixedMode = deployState.getProperties().featureFlags().enableProxyProtocolMixedMode();
if (deployState.endpointCertificateSecrets().isPresent()) {
boolean authorizeClient = deployState.zone().system().isPublic();
if (authorizeClient && deployState.tlsClientAuthority().isEmpty()) {
throw new IllegalArgumentException("Client certificate authority security/clients.pem is missing - " +
"see: https:
}
EndpointCertificateSecrets endpointCertificateSecrets = deployState.endpointCertificateSecrets().get();
boolean enforceHandshakeClientAuth = cluster.getHttp().getAccessControl()
.map(accessControl -> accessControl.clientAuthentication)
.map(clientAuth -> clientAuth == AccessControl.ClientAuthentication.need)
.orElse(false);
connectorFactory = authorizeClient
? HostedSslConnectorFactory.withProvidedCertificateAndTruststore(
serverName, endpointCertificateSecrets, getTlsClientAuthorities(deployState), tlsCiphersOverride, proxyProtocolMixedMode)
: HostedSslConnectorFactory.withProvidedCertificate(
serverName, endpointCertificateSecrets, enforceHandshakeClientAuth, tlsCiphersOverride, proxyProtocolMixedMode);
} else {
connectorFactory = HostedSslConnectorFactory.withDefaultCertificateAndTruststore(serverName, tlsCiphersOverride, proxyProtocolMixedMode);
}
cluster.getHttp().getAccessControl().ifPresent(accessControl -> accessControl.configureHostedConnector(connectorFactory));
server.addConnector(connectorFactory);
}
/*
Return trusted certificates as a PEM encoded string containing the concatenation of
trusted certs from the application package and all operator certificates.
*/
String getTlsClientAuthorities(DeployState deployState) {
List<X509Certificate> trustedCertificates = deployState.tlsClientAuthority()
.map(X509CertificateUtils::certificateListFromPem)
.orElse(Collections.emptyList());
ArrayList<X509Certificate> x509Certificates = new ArrayList<>(trustedCertificates);
x509Certificates.addAll(deployState.getProperties().operatorCertificates());
return X509CertificateUtils.toPem(x509Certificates);
}
private static boolean isHostedTenantApplication(ConfigModelContext context) {
var deployState = context.getDeployState();
boolean isTesterApplication = deployState.getProperties().applicationId().instance().isTester();
return deployState.isHosted() && context.getApplicationType() == ApplicationType.DEFAULT && !isTesterApplication;
}
private static void addHostedImplicitHttpIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
if (cluster.getHttp() == null) {
cluster.setHttp(new Http(new FilterChains(cluster)));
}
JettyHttpServer httpServer = cluster.getHttp().getHttpServer().orElse(null);
if (httpServer == null) {
httpServer = new JettyHttpServer("DefaultHttpServer", cluster, deployState);
cluster.getHttp().setHttpServer(httpServer);
}
int defaultPort = Defaults.getDefaults().vespaWebServicePort();
boolean defaultConnectorPresent = httpServer.getConnectorFactories().stream().anyMatch(connector -> connector.getListenPort() == defaultPort);
if (!defaultConnectorPresent) {
httpServer.addConnector(new ConnectorFactory.Builder("SearchServer", defaultPort).build());
}
}
private void addHostedImplicitAccessControlIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
Http http = cluster.getHttp();
if (http.getAccessControl().isPresent()) return;
AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null);
if (tenantDomain == null) return;
new AccessControl.Builder(tenantDomain.value())
.setHandlers(cluster)
.clientAuthentication(AccessControl.ClientAuthentication.need)
.build()
.configureHttpFilterChains(http);
}
private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement) {
Http http = new HttpBuilder().build(deployState, cluster, httpElement);
if (networking == Networking.disable)
http.removeAllServers();
return http;
}
private void addDocumentApi(Element spec, ApplicationContainerCluster cluster) {
ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec);
if (containerDocumentApi == null) return;
cluster.setDocumentApi(containerDocumentApi);
}
private void addDocproc(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec);
if (containerDocproc == null) return;
cluster.setDocproc(containerDocproc);
ContainerDocproc.Options docprocOptions = containerDocproc.options;
cluster.setMbusParams(new ApplicationContainerCluster.MbusParams(
docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory));
}
private void addSearch(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
Element searchElement = XML.getChild(spec, "search");
if (searchElement == null) return;
addIncludes(searchElement);
cluster.setSearch(buildSearch(deployState, cluster, searchElement));
addSearchHandler(cluster, searchElement);
addGUIHandler(cluster);
validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private void addModelEvaluation(Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element modelEvaluationElement = XML.getChild(spec, "model-evaluation");
if (modelEvaluationElement == null) return;
RankProfileList profiles =
context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty;
Element onnxElement = XML.getChild(modelEvaluationElement, "onnx");
Element modelsElement = XML.getChild(onnxElement, "models");
for (Element modelElement : XML.getChildren(modelsElement, "model") ) {
OnnxModel onnxModel = profiles.getOnnxModels().asMap().get(modelElement.getAttribute("name"));
if (onnxModel == null)
continue;
onnxModel.setStatelessExecutionMode(getStringValue(modelElement, "execution-mode", null));
onnxModel.setStatelessInterOpThreads(getIntValue(modelElement, "interop-threads", -1));
onnxModel.setStatelessIntraOpThreads(getIntValue(modelElement, "intraop-threads", -1));
}
cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles));
}
private String getStringValue(Element element, String name, String defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? child.getTextContent() : defaultValue;
}
private int getIntValue(Element element, String name, int defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? Integer.parseInt(child.getTextContent()) : defaultValue;
}
protected void addModelEvaluationBundles(ApplicationContainerCluster cluster) {
/* These bundles are added to all application container clusters, even if they haven't
* declared 'model-evaluation' in services.xml, because there are many public API packages
* in the model-evaluation bundle that could be used by customer code. */
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_EVALUATION_BUNDLE_FILE);
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_INTEGRATION_BUNDLE_FILE);
}
private void addProcessing(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
Element processingElement = XML.getChild(spec, "processing");
if (processingElement == null) return;
addIncludes(processingElement);
cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement),
serverBindings(processingElement, ProcessingChains.defaultBindings).toArray(BindingPattern[]::new));
validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private ContainerSearch buildSearch(DeployState deployState, ApplicationContainerCluster containerCluster, Element producerSpec) {
SearchChains searchChains = new DomSearchChainsBuilder()
.build(deployState, containerCluster, producerSpec);
ContainerSearch containerSearch = new ContainerSearch(containerCluster, searchChains, new ContainerSearch.Options());
applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch);
containerSearch.setQueryProfiles(deployState.getQueryProfiles());
containerSearch.setSemanticRules(deployState.getSemanticRules());
return containerSearch;
}
private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) {
PageTemplates.validate(applicationPackage);
containerSearch.setPageTemplates(PageTemplates.create(applicationPackage));
}
private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element component: XML.getChildren(spec, "handler")) {
cluster.addComponent(
new DomHandlerBuilder(cluster).build(deployState, cluster, component));
}
}
private void checkVersion(Element spec) {
String version = spec.getAttribute("version");
if ( ! Version.fromString(version).equals(new Version(1)))
throw new IllegalArgumentException("Expected container version to be 1.0, but got " + version);
}
private void addNodes(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
if (standaloneBuilder)
addStandaloneNode(cluster, context.getDeployState());
else
addNodesFromXml(cluster, spec, context);
}
private void addStandaloneNode(ApplicationContainerCluster cluster, DeployState deployState) {
ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), deployState);
cluster.addContainers(Collections.singleton(container));
}
private static String buildJvmGCOptions(ConfigModelContext context, String jvmGCOptions) {
return new JvmGcOptions(context.getDeployState(), jvmGCOptions).build();
}
private static String getJvmOptions(ApplicationContainerCluster cluster,
Element nodesElement,
DeployState deployState,
boolean legacyOptions) {
return new JvmOptions(cluster, nodesElement, deployState, legacyOptions).build();
}
private static String extractAttribute(Element element, String attrName) {
return element.hasAttribute(attrName) ? element.getAttribute(attrName) : null;
}
private void extractJvmOptions(List<ApplicationContainer> nodes,
ApplicationContainerCluster cluster,
Element nodesElement,
ConfigModelContext context) {
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) {
extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context);
} else {
extractJvmTag(nodes, cluster, nodesElement, jvmElement, context);
}
}
private void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployState(), true));
if (cluster.getJvmGCOptions().isEmpty()) {
String jvmGCOptions = extractAttribute(nodesElement, VespaDomBuilder.JVM_GC_OPTIONS);
cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions));
}
applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
}
private void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, Element jvmElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployState(), false));
applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
String jvmGCOptions = extractAttribute(jvmElement, VespaDomBuilder.GC_OPTIONS);
cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions));
}
/**
* Add nodes to cluster according to the given containerElement.
*
* Note: DO NOT change allocation behaviour to allow version X and Y of the config-model to allocate a different set
* of nodes. Such changes must be guarded by a common condition (e.g. feature flag) so the behaviour can be changed
* simultaneously for all active config models.
*/
private void addNodesFromXml(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) {
Element nodesElement = XML.getChild(containerElement, "nodes");
if (nodesElement == null) {
cluster.addContainers(allocateWithoutNodesTag(cluster, context));
} else {
List<ApplicationContainer> nodes = createNodes(cluster, containerElement, nodesElement, context);
extractJvmOptions(nodes, cluster, nodesElement, context);
applyDefaultPreload(nodes, nodesElement);
String environmentVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT));
if (!environmentVars.isEmpty()) {
cluster.setEnvironmentVars(environmentVars);
}
if (useCpuSocketAffinity(nodesElement))
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
}
private static String getEnvironmentVariables(Element environmentVariables) {
StringBuilder sb = new StringBuilder();
if (environmentVariables != null) {
for (Element var: XML.getChildren(environmentVariables)) {
sb.append(var.getNodeName()).append('=').append(var.getTextContent()).append(' ');
}
}
return sb.toString();
}
private static void applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) {
if (memoryPercentage == null || memoryPercentage.isEmpty()) return;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
try {
cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage));
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
}
}
/** Allocate a container cluster without a nodes tag */
private List<ApplicationContainer> allocateWithoutNodesTag(ApplicationContainerCluster cluster, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
HostSystem hostSystem = cluster.hostSystem();
if (deployState.isHosted()) {
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(deployState.getWantedNodeVespaVersion())
.dockerImageRepository(deployState.getWantedDockerImageRepo())
.build();
int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1;
deployState.getDeployLogger().logApplicationPackage(Level.INFO, "Using " + nodeCount +
" nodes in " + cluster);
Capacity capacity = Capacity.from(new ClusterResources(nodeCount, 1, NodeResources.unspecified()),
false,
!deployState.getProperties().isBootstrap());
var hosts = hostSystem.allocateHosts(clusterSpec, capacity, log);
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
else {
return singleHostContainerCluster(cluster, hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC), context);
}
}
private List<ApplicationContainer> singleHostContainerCluster(ApplicationContainerCluster cluster, HostResource host, ConfigModelContext context) {
ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, context.getDeployState());
node.setHostResource(host);
node.initService(context.getDeployState());
return List.of(node);
}
private List<ApplicationContainer> createNodesFromNodeCount(ApplicationContainerCluster cluster, Element containerElement, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().hostSystem(),
ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
log,
hasZooKeeper(containerElement));
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromNodeType(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodeType type = NodeType.valueOf(nodesElement.getAttribute("type"));
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(context.getDeployState().getWantedNodeVespaVersion())
.dockerImageRepository(context.getDeployState().getWantedDockerImageRepo())
.build();
Map<HostResource, ClusterMembership> hosts =
cluster.getRoot().hostSystem().allocateHosts(clusterSpec,
Capacity.fromRequiredNodeType(type), log);
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromContentServiceReference(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodeSpecification;
try {
nodeSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(cluster + " contains an invalid reference", e);
}
String referenceId = nodesElement.getAttribute("of");
cluster.setHostClusterId(referenceId);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodeSpecification,
referenceId,
cluster.getRoot().hostSystem(),
context.getDeployLogger());
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromHosts(Map<HostResource, ClusterMembership> hosts,
ApplicationContainerCluster cluster,
DeployState deployState) {
List<ApplicationContainer> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), deployState);
container.setHostResource(entry.getKey());
container.initService(deployState);
nodes.add(container);
}
return nodes;
}
private List<ApplicationContainer> createNodesFromNodeList(DeployState deployState, ApplicationContainerCluster cluster, Element nodesElement) {
List<ApplicationContainer> nodes = new ArrayList<>();
int nodeIndex = 0;
for (Element nodeElem: XML.getChildren(nodesElement, "node")) {
nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem));
nodeIndex++;
}
return nodes;
}
private static boolean useCpuSocketAffinity(Element nodesElement) {
if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME))
return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME));
else
return false;
}
private static void applyNodesTagJvmArgs(List<ApplicationContainer> containers, String jvmArgs) {
for (Container container: containers) {
if (container.getAssignedJvmOptions().isEmpty())
container.prependJvmOptions(jvmArgs);
}
}
private static void applyDefaultPreload(List<ApplicationContainer> containers, Element nodesElement) {
if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return;
for (Container container: containers)
container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME));
}
private void addSearchHandler(ApplicationContainerCluster cluster, Element searchElement) {
cluster.addComponent(new ProcessingHandler<>(cluster.getSearch().getChains(),
"com.yahoo.search.searchchain.ExecutionFactory"));
cluster.addComponent(
new SearchHandler(
cluster,
serverBindings(searchElement, SearchHandler.DEFAULT_BINDING),
ContainerThreadpool.UserOptions.fromXml(searchElement).orElse(null)));
}
private void addGUIHandler(ApplicationContainerCluster cluster) {
Handler<?> guiHandler = new GUIHandler();
guiHandler.addServerBindings(SystemBindingPattern.fromHttpPath(GUIHandler.BINDING_PATH));
cluster.addComponent(guiHandler);
}
private List<BindingPattern> serverBindings(Element searchElement, BindingPattern... defaultBindings) {
List<Element> bindings = XML.getChildren(searchElement, "binding");
if (bindings.isEmpty())
return List.of(defaultBindings);
return toBindingList(bindings);
}
private List<BindingPattern> toBindingList(List<Element> bindingElements) {
List<BindingPattern> result = new ArrayList<>();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
result.add(UserBindingPattern.fromPattern(text));
}
return result;
}
private ContainerDocumentApi buildDocumentApi(ApplicationContainerCluster cluster, Element spec) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
return new ContainerDocumentApi(cluster, documentApiOptions);
}
private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
Element docprocElement = XML.getChild(spec, "document-processing");
if (docprocElement == null)
return null;
addIncludes(docprocElement);
DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement);
ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement);
return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder);
}
private void addIncludes(Element parentElement) {
List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE);
if (includes.isEmpty()) {
return;
}
if (app == null) {
throw new IllegalArgumentException("Element <include> given in XML config, but no application package given.");
}
for (Element include : includes) {
addInclude(parentElement, include);
}
}
private void addInclude(Element parentElement, Element include) {
String dirName = include.getAttribute(IncludeDirs.DIR);
app.validateIncludeDir(dirName);
List<Element> includedFiles = Xml.allElemsFromPath(app, dirName);
for (Element includedFile : includedFiles) {
List<Element> includedSubElements = XML.getChildren(includedFile);
for (Element includedSubElement : includedSubElements) {
Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true);
parentElement.appendChild(copiedNode);
}
}
}
private static void addConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster,
Element spec, String componentName) {
for (Element node : XML.getChildren(spec, componentName)) {
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private static void validateAndAddConfiguredComponents(DeployState deployState,
ContainerCluster<? extends Container> cluster,
Element spec, String componentName,
Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private void addIdentityProvider(ApplicationContainerCluster cluster,
List<ConfigServerSpec> configServerSpecs,
HostName loadBalancerName,
URI ztsUrl,
String athenzDnsSuffix,
Zone zone,
DeploymentSpec spec) {
spec.athenzDomain()
.ifPresent(domain -> {
AthenzService service = spec.instance(app.getApplicationId().instance())
.flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region()))
.or(() -> spec.athenzService())
.orElseThrow(() -> new IllegalArgumentException("Missing Athenz service configuration in instance '" +
app.getApplicationId().instance() + "'"));
String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix;
IdentityProvider identityProvider = new IdentityProvider(domain,
service,
getLoadBalancerName(loadBalancerName, configServerSpecs),
ztsUrl,
zoneDnsSuffix,
zone);
cluster.addComponent(identityProvider);
cluster.getContainers().forEach(container -> {
container.setProp("identity.domain", domain.value());
container.setProp("identity.service", service.value());
});
});
}
private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) {
return Optional.ofNullable(loadbalancerName)
.orElseGet(
() -> HostName.of(configServerSpecs.stream()
.findFirst()
.map(ConfigServerSpec::getHostName)
.orElse("unknown")
));
}
private static boolean hasZooKeeper(Element spec) {
return XML.getChild(spec, "zookeeper") != null;
}
/** Disallow renderers named "XmlRenderer" or "JsonRenderer" */
private static void validateRendererElement(Element element) {
String idAttr = element.getAttribute("id");
if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) {
throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr));
}
}
public static boolean isContainerTag(Element element) {
return CONTAINER_TAG.equals(element.getTagName());
}
/**
* Validates JVM options and logs a warning or fails deployment (depending on feature flag)
* if anyone of them has invalid syntax or is an option that is unsupported for the running system.
*/
private static class JvmOptions {
private static final Pattern validPattern = Pattern.compile("-[a-zA-z0-9=:./,+-]+");
private static final Pattern invalidInHostedatttern = Pattern.compile("-Xrunjdwp:transport=.*");
private final ContainerCluster<?> cluster;
private final Element nodesElement;
private final DeployLogger logger;
private final boolean legacyOptions;
private final boolean isHosted;
public JvmOptions(ContainerCluster<?> cluster, Element nodesElement, DeployState deployState, boolean legacyOptions) {
this.cluster = cluster;
this.nodesElement = nodesElement;
this.logger = deployState.getDeployLogger();
this.legacyOptions = legacyOptions;
this.isHosted = deployState.isHosted();
}
String build() {
if (legacyOptions)
return buildLegacyOptions();
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) return "";
String jvmOptions = jvmElement.getAttribute(VespaDomBuilder.OPTIONS);
if (jvmOptions.isEmpty()) return "";
validateJvmOptions(jvmOptions);
return jvmOptions;
}
String buildLegacyOptions() {
String jvmOptions = null;
if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS);
if (! jvmOptions.isEmpty())
logger.logApplicationPackage(WARNING, "'jvm-options' is deprecated and will be removed in Vespa 8." +
" Please merge 'jvm-options' into 'options' or 'gc-options' in 'jvm' element." +
" See https:
}
validateJvmOptions(jvmOptions);
return jvmOptions;
}
private void validateJvmOptions(String jvmOptions) {
if (jvmOptions == null || jvmOptions.isEmpty()) return;
String[] optionList = jvmOptions.split(" ");
List<String> invalidOptions = Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> !Pattern.matches(validPattern.pattern(), option))
.sorted()
.collect(Collectors.toList());
if (isHosted)
invalidOptions.addAll(Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> Pattern.matches(invalidInHostedatttern.pattern(), option))
.sorted()
.collect(Collectors.toList()));
if (invalidOptions.isEmpty()) return;
String message = "Invalid or misplaced JVM options in services.xml: " +
String.join(",", invalidOptions) + "." +
" See https:
if (isHosted)
throw new IllegalArgumentException(message);
else
logger.logApplicationPackage(WARNING, message);
}
}
/**
* Validates JVM GC options and logs a warning or fails deployment (depending on feature flag)
* if anyone of them has invalid syntax or is an option that is unsupported for the running system
* (e.g. uses CMS options for hosted Vespa, which uses JDK 17).
*/
private static class JvmGcOptions {
private static final Pattern validPattern = Pattern.compile("-XX:[+-]*[a-zA-z0-9=]+");
private static final Pattern invalidCMSPattern = Pattern.compile("-XX:[+-]\\w*CMS[a-zA-z0-9=]+");
private final DeployState deployState;
private final String jvmGcOptions;
private final DeployLogger logger;
private final boolean isHosted;
public JvmGcOptions(DeployState deployState, String jvmGcOptions) {
this.deployState = deployState;
this.jvmGcOptions = jvmGcOptions;
this.logger = deployState.getDeployLogger();
this.isHosted = deployState.isHosted();
}
private String build() {
String options = deployState.getProperties().jvmGCOptions();
if (jvmGcOptions != null) {
options = jvmGcOptions;
String[] optionList = options.split(" ");
List<String> invalidOptions = Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> !Pattern.matches(validPattern.pattern(), option))
.collect(Collectors.toList());
if (isHosted) {
invalidOptions.addAll(Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> Pattern.matches(invalidCMSPattern.pattern(), option) ||
option.equals("-XX:+UseConcMarkSweepGC"))
.collect(Collectors.toList()));
}
logOrFailInvalidOptions(invalidOptions);
}
if (options == null || options.isEmpty())
options = deployState.isHosted() ? ContainerCluster.PARALLEL_GC : ContainerCluster.G1GC;
return options;
}
private void logOrFailInvalidOptions(List<String> options) {
if (options.isEmpty()) return;
Collections.sort(options);
String message = "Invalid or misplaced JVM GC options in services.xml: " +
String.join(",", options) + "." +
" See https:
if (isHosted)
throw new IllegalArgumentException(message);
else
logger.logApplicationPackage(WARNING, message);
}
}
} | class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html");
private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE";
private static final String CONTAINER_TAG = "container";
private static final String ENVIRONMENT_VARIABLES_ELEMENT = "environment-variables";
private static final int MIN_ZOOKEEPER_NODE_COUNT = 1;
private static final int MAX_ZOOKEEPER_NODE_COUNT = 7;
public enum Networking { disable, enable }
private ApplicationPackage app;
private final boolean standaloneBuilder;
private final Networking networking;
private final boolean rpcServerEnabled;
private final boolean httpServerEnabled;
protected DeployLogger log;
public static final List<ConfigModelId> configModelIds = ImmutableList.of(ConfigModelId.fromName(CONTAINER_TAG));
private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName();
private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName();
public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) {
super(ContainerModel.class);
this.standaloneBuilder = standaloneBuilder;
this.networking = networking;
this.rpcServerEnabled = !standaloneBuilder;
this.httpServerEnabled = networking == Networking.enable;
}
@Override
public List<ConfigModelId> handlesElements() {
return configModelIds;
}
@Override
public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) {
log = modelContext.getDeployLogger();
app = modelContext.getApplicationPackage();
checkVersion(spec);
ApplicationContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
cluster.setMessageBusEnabled(rpcServerEnabled);
cluster.setRpcServerEnabled(rpcServerEnabled);
cluster.setHttpServerEnabled(httpServerEnabled);
model.setCluster(cluster);
}
private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilder<ApplicationContainerCluster>() {
@Override
protected ApplicationContainerCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) {
return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(),
modelContext.getProducerId(), deployState);
}
}.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec);
}
private void addClusterContent(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
DocumentFactoryBuilder.buildDocumentFactories(cluster, spec);
addConfiguredComponents(deployState, cluster, spec);
addSecretStore(cluster, spec, deployState);
addEmbedderComponents(deployState, cluster, spec);
addModelEvaluation(spec, cluster, context);
addModelEvaluationBundles(cluster);
addProcessing(deployState, spec, cluster);
addSearch(deployState, spec, cluster);
addDocproc(deployState, spec, cluster);
addDocumentApi(spec, cluster);
cluster.addDefaultHandlersExceptStatus();
addStatusHandlers(cluster, context.getDeployState().isHosted());
addUserHandlers(deployState, cluster, spec);
addHttp(deployState, spec, cluster, context);
addAccessLogs(deployState, cluster, spec);
addNodes(cluster, spec, context);
addServerProviders(deployState, spec, cluster);
addDeploymentSpecConfig(cluster, context, deployState.getDeployLogger());
addZooKeeper(cluster, spec);
addParameterStoreValidationHandler(cluster, deployState);
}
private void addParameterStoreValidationHandler(ApplicationContainerCluster cluster, DeployState deployState) {
if(deployState.isHosted()) {
cluster.addPlatformBundle(PlatformBundles.absoluteBundlePath("jdisc-cloud-aws"));
}
if (deployState.zone().system().isPublic()) {
BindingPattern bindingPattern = SystemBindingPattern.fromHttpPath("/validate-secret-store");
Handler<AbstractConfigProducer<?>> handler = new Handler<>(
new ComponentModel("com.yahoo.jdisc.cloud.aws.AwsParameterStoreValidationHandler", null, "jdisc-cloud-aws", null));
handler.addServerBindings(bindingPattern);
cluster.addComponent(handler);
}
}
private void addZooKeeper(ApplicationContainerCluster cluster, Element spec) {
if ( ! hasZooKeeper(spec)) return;
Element nodesElement = XML.getChild(spec, "nodes");
boolean isCombined = nodesElement != null && nodesElement.hasAttribute("of");
if (isCombined) {
throw new IllegalArgumentException("A combined cluster cannot run ZooKeeper");
}
long nonRetiredNodes = cluster.getContainers().stream().filter(c -> !c.isRetired()).count();
if (nonRetiredNodes < MIN_ZOOKEEPER_NODE_COUNT || nonRetiredNodes > MAX_ZOOKEEPER_NODE_COUNT || nonRetiredNodes % 2 == 0) {
throw new IllegalArgumentException("Cluster with ZooKeeper needs an odd number of nodes, between " +
MIN_ZOOKEEPER_NODE_COUNT + " and " + MAX_ZOOKEEPER_NODE_COUNT +
", have " + nonRetiredNodes + " non-retired");
}
cluster.addSimpleComponent("com.yahoo.vespa.curator.Curator", null, "zkfacade");
cluster.getContainers().forEach(ContainerModelBuilder::addReconfigurableZooKeeperServerComponents);
}
public static void addReconfigurableZooKeeperServerComponents(Container container) {
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.ReconfigurableVespaZooKeeperServer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.Reconfigurer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.VespaZooKeeperAdminImpl", container));
}
private static SimpleComponent zookeeperComponent(String idSpec, Container container) {
String configId = container.getConfigId();
return new SimpleComponent(new ComponentModel(idSpec, null, "zookeeper-server", configId));
}
private void addSecretStore(ApplicationContainerCluster cluster, Element spec, DeployState deployState) {
Element secretStoreElement = XML.getChild(spec, "secret-store");
if (secretStoreElement != null) {
String type = secretStoreElement.getAttribute("type");
if ("cloud".equals(type)) {
addCloudSecretStore(cluster, secretStoreElement, deployState);
} else {
SecretStore secretStore = new SecretStore();
for (Element group : XML.getChildren(secretStoreElement, "group")) {
secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment"));
}
cluster.setSecretStore(secretStore);
}
}
}
private void addCloudSecretStore(ApplicationContainerCluster cluster, Element secretStoreElement, DeployState deployState) {
if ( ! deployState.isHosted()) return;
if ( ! cluster.getZone().system().isPublic())
throw new IllegalArgumentException("Cloud secret store is not supported in non-public system, see the documentation");
CloudSecretStore cloudSecretStore = new CloudSecretStore();
Map<String, TenantSecretStore> secretStoresByName = deployState.getProperties().tenantSecretStores()
.stream()
.collect(Collectors.toMap(
TenantSecretStore::getName,
store -> store
));
Element store = XML.getChild(secretStoreElement, "store");
for (Element group : XML.getChildren(store, "aws-parameter-store")) {
String account = group.getAttribute("account");
String region = group.getAttribute("aws-region");
TenantSecretStore secretStore = secretStoresByName.get(account);
if (secretStore == null)
throw new IllegalArgumentException("No configured secret store named " + account);
if (secretStore.getExternalId().isEmpty())
throw new IllegalArgumentException("No external ID has been set");
cloudSecretStore.addConfig(account, region, secretStore.getAwsId(), secretStore.getRole(), secretStore.getExternalId().get());
}
cluster.addComponent(cloudSecretStore);
}
private void addDeploymentSpecConfig(ApplicationContainerCluster cluster, ConfigModelContext context, DeployLogger deployLogger) {
if ( ! context.getDeployState().isHosted()) return;
Optional<DeploymentSpec> deploymentSpec = app.getDeployment().map(DeploymentSpec::fromXml);
if (deploymentSpec.isEmpty()) return;
for (var deprecatedElement : deploymentSpec.get().deprecatedElements()) {
deployLogger.logApplicationPackage(WARNING, deprecatedElement.humanReadableString());
}
addIdentityProvider(cluster,
context.getDeployState().getProperties().configServerSpecs(),
context.getDeployState().getProperties().loadBalancerName(),
context.getDeployState().getProperties().ztsUrl(),
context.getDeployState().getProperties().athenzDnsSuffix(),
context.getDeployState().zone(),
deploymentSpec.get());
addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getEndpoints(), deploymentSpec.get());
}
private void addRotationProperties(ApplicationContainerCluster cluster, Zone zone, Set<ContainerEndpoint> endpoints, DeploymentSpec spec) {
cluster.getContainers().forEach(container -> {
setRotations(container, endpoints, cluster.getName());
container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec)));
});
}
private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) {
Optional<DeploymentInstanceSpec> instance = spec.instance(app.getApplicationId().instance());
if (instance.isEmpty()) return false;
return instance.get().zones().stream()
.anyMatch(declaredZone -> declaredZone.concerns(zone.environment(), Optional.of(zone.region())) &&
declaredZone.active());
}
private void setRotations(Container container, Set<ContainerEndpoint> endpoints, String containerClusterName) {
var rotationsProperty = endpoints.stream()
.filter(endpoint -> endpoint.clusterId().equals(containerClusterName))
.filter(endpoint -> endpoint.scope() == ApplicationClusterEndpoint.Scope.global)
.flatMap(endpoint -> endpoint.names().stream())
.collect(Collectors.toCollection(() -> new LinkedHashSet<>()));
container.setProp("rotations", String.join(",", rotationsProperty));
}
private static void addEmbedderComponents(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element node : XML.getChildren(spec, "embedder")) {
Element transformed = EmbedderConfig.transform(deployState, node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, transformed));
}
}
private void addConfiguredComponents(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element components : XML.getChildren(spec, "components")) {
addIncludes(components);
addConfiguredComponents(deployState, cluster, components, "component");
}
addConfiguredComponents(deployState, cluster, spec, "component");
}
protected void addStatusHandlers(ApplicationContainerCluster cluster, boolean isHostedVespa) {
if (isHostedVespa) {
String name = "status.html";
Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_SETTING));
cluster.addComponent(
new FileStatusHandlerComponent(
name + "-status-handler",
statusFile.orElse(HOSTED_VESPA_STATUS_FILE),
SystemBindingPattern.fromHttpPath("/" + name)));
} else {
cluster.addVipHandler();
}
}
private void addServerProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
addConfiguredComponents(deployState, cluster, spec, "server");
}
protected void addAccessLogs(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
List<Element> accessLogElements = getAccessLogElements(spec);
if (cluster.isHostedVespa() && !accessLogElements.isEmpty()) {
accessLogElements.clear();
log.logApplicationPackage(
Level.WARNING, "Applications are not allowed to override the 'accesslog' element");
} else {
for (Element accessLog : accessLogElements) {
AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(cluster::addComponent);
}
}
if (accessLogElements.isEmpty() && deployState.getAccessLoggingEnabledByDefault())
cluster.addDefaultSearchAccessLog();
if (cluster.getAllComponents().stream().anyMatch(component -> component instanceof AccessLogComponent)) {
if (cluster.isHostedVespa() || deployState.getVespaVersion().getMajor() == 8) {
cluster.addComponent(new ConnectionLogComponent(cluster, FileConnectionLog.class, "access"));
} else {
cluster.addComponent(new ConnectionLogComponent(cluster, FileConnectionLog.class, "qrs"));
}
}
}
private List<Element> getAccessLogElements(Element spec) {
return XML.getChildren(spec, "accesslog");
}
protected void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element httpElement = XML.getChild(spec, "http");
if (httpElement != null) {
cluster.setHttp(buildHttp(deployState, cluster, httpElement));
}
if (isHostedTenantApplication(context)) {
addHostedImplicitHttpIfNotPresent(deployState, cluster);
addHostedImplicitAccessControlIfNotPresent(deployState, cluster);
addDefaultConnectorHostedFilterBinding(cluster);
addAdditionalHostedConnector(deployState, cluster, context);
}
}
private void addDefaultConnectorHostedFilterBinding(ApplicationContainerCluster cluster) {
cluster.getHttp().getAccessControl()
.ifPresent(accessControl -> accessControl.configureDefaultHostedConnector(cluster.getHttp())); ;
}
private void addAdditionalHostedConnector(DeployState deployState, ApplicationContainerCluster cluster, ConfigModelContext context) {
JettyHttpServer server = cluster.getHttp().getHttpServer().get();
String serverName = server.getComponentId().getName();
HostedSslConnectorFactory connectorFactory;
Collection<String> tlsCiphersOverride = deployState.getProperties().tlsCiphersOverride();
boolean proxyProtocolMixedMode = deployState.getProperties().featureFlags().enableProxyProtocolMixedMode();
if (deployState.endpointCertificateSecrets().isPresent()) {
boolean authorizeClient = deployState.zone().system().isPublic();
if (authorizeClient && deployState.tlsClientAuthority().isEmpty()) {
throw new IllegalArgumentException("Client certificate authority security/clients.pem is missing - " +
"see: https:
}
EndpointCertificateSecrets endpointCertificateSecrets = deployState.endpointCertificateSecrets().get();
boolean enforceHandshakeClientAuth = cluster.getHttp().getAccessControl()
.map(accessControl -> accessControl.clientAuthentication)
.map(clientAuth -> clientAuth == AccessControl.ClientAuthentication.need)
.orElse(false);
connectorFactory = authorizeClient
? HostedSslConnectorFactory.withProvidedCertificateAndTruststore(
serverName, endpointCertificateSecrets, getTlsClientAuthorities(deployState), tlsCiphersOverride, proxyProtocolMixedMode)
: HostedSslConnectorFactory.withProvidedCertificate(
serverName, endpointCertificateSecrets, enforceHandshakeClientAuth, tlsCiphersOverride, proxyProtocolMixedMode);
} else {
connectorFactory = HostedSslConnectorFactory.withDefaultCertificateAndTruststore(serverName, tlsCiphersOverride, proxyProtocolMixedMode);
}
cluster.getHttp().getAccessControl().ifPresent(accessControl -> accessControl.configureHostedConnector(connectorFactory));
server.addConnector(connectorFactory);
}
/*
Return trusted certificates as a PEM encoded string containing the concatenation of
trusted certs from the application package and all operator certificates.
*/
String getTlsClientAuthorities(DeployState deployState) {
List<X509Certificate> trustedCertificates = deployState.tlsClientAuthority()
.map(X509CertificateUtils::certificateListFromPem)
.orElse(Collections.emptyList());
ArrayList<X509Certificate> x509Certificates = new ArrayList<>(trustedCertificates);
x509Certificates.addAll(deployState.getProperties().operatorCertificates());
return X509CertificateUtils.toPem(x509Certificates);
}
private static boolean isHostedTenantApplication(ConfigModelContext context) {
var deployState = context.getDeployState();
boolean isTesterApplication = deployState.getProperties().applicationId().instance().isTester();
return deployState.isHosted() && context.getApplicationType() == ApplicationType.DEFAULT && !isTesterApplication;
}
private static void addHostedImplicitHttpIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
if (cluster.getHttp() == null) {
cluster.setHttp(new Http(new FilterChains(cluster)));
}
JettyHttpServer httpServer = cluster.getHttp().getHttpServer().orElse(null);
if (httpServer == null) {
httpServer = new JettyHttpServer("DefaultHttpServer", cluster, deployState);
cluster.getHttp().setHttpServer(httpServer);
}
int defaultPort = Defaults.getDefaults().vespaWebServicePort();
boolean defaultConnectorPresent = httpServer.getConnectorFactories().stream().anyMatch(connector -> connector.getListenPort() == defaultPort);
if (!defaultConnectorPresent) {
httpServer.addConnector(new ConnectorFactory.Builder("SearchServer", defaultPort).build());
}
}
private void addHostedImplicitAccessControlIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
Http http = cluster.getHttp();
if (http.getAccessControl().isPresent()) return;
AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null);
if (tenantDomain == null) return;
new AccessControl.Builder(tenantDomain.value())
.setHandlers(cluster)
.clientAuthentication(AccessControl.ClientAuthentication.need)
.build()
.configureHttpFilterChains(http);
}
private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement) {
Http http = new HttpBuilder().build(deployState, cluster, httpElement);
if (networking == Networking.disable)
http.removeAllServers();
return http;
}
private void addDocumentApi(Element spec, ApplicationContainerCluster cluster) {
ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec);
if (containerDocumentApi == null) return;
cluster.setDocumentApi(containerDocumentApi);
}
private void addDocproc(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec);
if (containerDocproc == null) return;
cluster.setDocproc(containerDocproc);
ContainerDocproc.Options docprocOptions = containerDocproc.options;
cluster.setMbusParams(new ApplicationContainerCluster.MbusParams(
docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory));
}
private void addSearch(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
Element searchElement = XML.getChild(spec, "search");
if (searchElement == null) return;
addIncludes(searchElement);
cluster.setSearch(buildSearch(deployState, cluster, searchElement));
addSearchHandler(cluster, searchElement);
addGUIHandler(cluster);
validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private void addModelEvaluation(Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element modelEvaluationElement = XML.getChild(spec, "model-evaluation");
if (modelEvaluationElement == null) return;
RankProfileList profiles =
context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty;
Element onnxElement = XML.getChild(modelEvaluationElement, "onnx");
Element modelsElement = XML.getChild(onnxElement, "models");
for (Element modelElement : XML.getChildren(modelsElement, "model") ) {
OnnxModel onnxModel = profiles.getOnnxModels().asMap().get(modelElement.getAttribute("name"));
if (onnxModel == null)
continue;
onnxModel.setStatelessExecutionMode(getStringValue(modelElement, "execution-mode", null));
onnxModel.setStatelessInterOpThreads(getIntValue(modelElement, "interop-threads", -1));
onnxModel.setStatelessIntraOpThreads(getIntValue(modelElement, "intraop-threads", -1));
}
cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles));
}
private String getStringValue(Element element, String name, String defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? child.getTextContent() : defaultValue;
}
private int getIntValue(Element element, String name, int defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? Integer.parseInt(child.getTextContent()) : defaultValue;
}
protected void addModelEvaluationBundles(ApplicationContainerCluster cluster) {
/* These bundles are added to all application container clusters, even if they haven't
* declared 'model-evaluation' in services.xml, because there are many public API packages
* in the model-evaluation bundle that could be used by customer code. */
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_EVALUATION_BUNDLE_FILE);
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_INTEGRATION_BUNDLE_FILE);
}
private void addProcessing(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
Element processingElement = XML.getChild(spec, "processing");
if (processingElement == null) return;
addIncludes(processingElement);
cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement),
serverBindings(processingElement, ProcessingChains.defaultBindings).toArray(BindingPattern[]::new));
validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private ContainerSearch buildSearch(DeployState deployState, ApplicationContainerCluster containerCluster, Element producerSpec) {
SearchChains searchChains = new DomSearchChainsBuilder()
.build(deployState, containerCluster, producerSpec);
ContainerSearch containerSearch = new ContainerSearch(containerCluster, searchChains, new ContainerSearch.Options());
applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch);
containerSearch.setQueryProfiles(deployState.getQueryProfiles());
containerSearch.setSemanticRules(deployState.getSemanticRules());
return containerSearch;
}
private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) {
PageTemplates.validate(applicationPackage);
containerSearch.setPageTemplates(PageTemplates.create(applicationPackage));
}
private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element component: XML.getChildren(spec, "handler")) {
cluster.addComponent(
new DomHandlerBuilder(cluster).build(deployState, cluster, component));
}
}
private void checkVersion(Element spec) {
String version = spec.getAttribute("version");
if ( ! Version.fromString(version).equals(new Version(1)))
throw new IllegalArgumentException("Expected container version to be 1.0, but got " + version);
}
private void addNodes(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
if (standaloneBuilder)
addStandaloneNode(cluster, context.getDeployState());
else
addNodesFromXml(cluster, spec, context);
}
private void addStandaloneNode(ApplicationContainerCluster cluster, DeployState deployState) {
ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), deployState);
cluster.addContainers(Collections.singleton(container));
}
private static String buildJvmGCOptions(ConfigModelContext context, String jvmGCOptions) {
return new JvmGcOptions(context.getDeployState(), jvmGCOptions).build();
}
private static String getJvmOptions(ApplicationContainerCluster cluster,
Element nodesElement,
DeployState deployState,
boolean legacyOptions) {
return new JvmOptions(cluster, nodesElement, deployState, legacyOptions).build();
}
private static String extractAttribute(Element element, String attrName) {
return element.hasAttribute(attrName) ? element.getAttribute(attrName) : null;
}
private void extractJvmOptions(List<ApplicationContainer> nodes,
ApplicationContainerCluster cluster,
Element nodesElement,
ConfigModelContext context) {
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) {
extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context);
} else {
extractJvmTag(nodes, cluster, nodesElement, jvmElement, context);
}
}
private void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployState(), true));
if (cluster.getJvmGCOptions().isEmpty()) {
String jvmGCOptions = extractAttribute(nodesElement, VespaDomBuilder.JVM_GC_OPTIONS);
cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions));
}
applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
}
private void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, Element jvmElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployState(), false));
applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
String jvmGCOptions = extractAttribute(jvmElement, VespaDomBuilder.GC_OPTIONS);
cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions));
}
/**
* Add nodes to cluster according to the given containerElement.
*
* Note: DO NOT change allocation behaviour to allow version X and Y of the config-model to allocate a different set
* of nodes. Such changes must be guarded by a common condition (e.g. feature flag) so the behaviour can be changed
* simultaneously for all active config models.
*/
private void addNodesFromXml(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) {
Element nodesElement = XML.getChild(containerElement, "nodes");
if (nodesElement == null) {
cluster.addContainers(allocateWithoutNodesTag(cluster, context));
} else {
List<ApplicationContainer> nodes = createNodes(cluster, containerElement, nodesElement, context);
extractJvmOptions(nodes, cluster, nodesElement, context);
applyDefaultPreload(nodes, nodesElement);
String environmentVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT));
if (!environmentVars.isEmpty()) {
cluster.setEnvironmentVars(environmentVars);
}
if (useCpuSocketAffinity(nodesElement))
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
}
private static String getEnvironmentVariables(Element environmentVariables) {
StringBuilder sb = new StringBuilder();
if (environmentVariables != null) {
for (Element var: XML.getChildren(environmentVariables)) {
sb.append(var.getNodeName()).append('=').append(var.getTextContent()).append(' ');
}
}
return sb.toString();
}
private static void applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) {
if (memoryPercentage == null || memoryPercentage.isEmpty()) return;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
try {
cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage));
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
}
}
/** Allocate a container cluster without a nodes tag */
private List<ApplicationContainer> allocateWithoutNodesTag(ApplicationContainerCluster cluster, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
HostSystem hostSystem = cluster.hostSystem();
if (deployState.isHosted()) {
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(deployState.getWantedNodeVespaVersion())
.dockerImageRepository(deployState.getWantedDockerImageRepo())
.build();
int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1;
deployState.getDeployLogger().logApplicationPackage(Level.INFO, "Using " + nodeCount +
" nodes in " + cluster);
Capacity capacity = Capacity.from(new ClusterResources(nodeCount, 1, NodeResources.unspecified()),
false,
!deployState.getProperties().isBootstrap());
var hosts = hostSystem.allocateHosts(clusterSpec, capacity, log);
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
else {
return singleHostContainerCluster(cluster, hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC), context);
}
}
private List<ApplicationContainer> singleHostContainerCluster(ApplicationContainerCluster cluster, HostResource host, ConfigModelContext context) {
ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, context.getDeployState());
node.setHostResource(host);
node.initService(context.getDeployState());
return List.of(node);
}
private List<ApplicationContainer> createNodesFromNodeCount(ApplicationContainerCluster cluster, Element containerElement, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().hostSystem(),
ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
log,
hasZooKeeper(containerElement));
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromNodeType(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodeType type = NodeType.valueOf(nodesElement.getAttribute("type"));
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(context.getDeployState().getWantedNodeVespaVersion())
.dockerImageRepository(context.getDeployState().getWantedDockerImageRepo())
.build();
Map<HostResource, ClusterMembership> hosts =
cluster.getRoot().hostSystem().allocateHosts(clusterSpec,
Capacity.fromRequiredNodeType(type), log);
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromContentServiceReference(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodeSpecification;
try {
nodeSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(cluster + " contains an invalid reference", e);
}
String referenceId = nodesElement.getAttribute("of");
cluster.setHostClusterId(referenceId);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodeSpecification,
referenceId,
cluster.getRoot().hostSystem(),
context.getDeployLogger());
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromHosts(Map<HostResource, ClusterMembership> hosts,
ApplicationContainerCluster cluster,
DeployState deployState) {
List<ApplicationContainer> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), deployState);
container.setHostResource(entry.getKey());
container.initService(deployState);
nodes.add(container);
}
return nodes;
}
private List<ApplicationContainer> createNodesFromNodeList(DeployState deployState, ApplicationContainerCluster cluster, Element nodesElement) {
List<ApplicationContainer> nodes = new ArrayList<>();
int nodeIndex = 0;
for (Element nodeElem: XML.getChildren(nodesElement, "node")) {
nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem));
nodeIndex++;
}
return nodes;
}
private static boolean useCpuSocketAffinity(Element nodesElement) {
if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME))
return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME));
else
return false;
}
private static void applyNodesTagJvmArgs(List<ApplicationContainer> containers, String jvmArgs) {
for (Container container: containers) {
if (container.getAssignedJvmOptions().isEmpty())
container.prependJvmOptions(jvmArgs);
}
}
private static void applyDefaultPreload(List<ApplicationContainer> containers, Element nodesElement) {
if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return;
for (Container container: containers)
container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME));
}
private void addSearchHandler(ApplicationContainerCluster cluster, Element searchElement) {
cluster.addComponent(new ProcessingHandler<>(cluster.getSearch().getChains(),
"com.yahoo.search.searchchain.ExecutionFactory"));
cluster.addComponent(
new SearchHandler(
cluster,
serverBindings(searchElement, SearchHandler.DEFAULT_BINDING),
ContainerThreadpool.UserOptions.fromXml(searchElement).orElse(null)));
}
private void addGUIHandler(ApplicationContainerCluster cluster) {
Handler<?> guiHandler = new GUIHandler();
guiHandler.addServerBindings(SystemBindingPattern.fromHttpPath(GUIHandler.BINDING_PATH));
cluster.addComponent(guiHandler);
}
private List<BindingPattern> serverBindings(Element searchElement, BindingPattern... defaultBindings) {
List<Element> bindings = XML.getChildren(searchElement, "binding");
if (bindings.isEmpty())
return List.of(defaultBindings);
return toBindingList(bindings);
}
private List<BindingPattern> toBindingList(List<Element> bindingElements) {
List<BindingPattern> result = new ArrayList<>();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
result.add(UserBindingPattern.fromPattern(text));
}
return result;
}
private ContainerDocumentApi buildDocumentApi(ApplicationContainerCluster cluster, Element spec) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
return new ContainerDocumentApi(cluster, documentApiOptions);
}
private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
Element docprocElement = XML.getChild(spec, "document-processing");
if (docprocElement == null)
return null;
addIncludes(docprocElement);
DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement);
ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement);
return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder);
}
private void addIncludes(Element parentElement) {
List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE);
if (includes.isEmpty()) {
return;
}
if (app == null) {
throw new IllegalArgumentException("Element <include> given in XML config, but no application package given.");
}
for (Element include : includes) {
addInclude(parentElement, include);
}
}
private void addInclude(Element parentElement, Element include) {
String dirName = include.getAttribute(IncludeDirs.DIR);
app.validateIncludeDir(dirName);
List<Element> includedFiles = Xml.allElemsFromPath(app, dirName);
for (Element includedFile : includedFiles) {
List<Element> includedSubElements = XML.getChildren(includedFile);
for (Element includedSubElement : includedSubElements) {
Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true);
parentElement.appendChild(copiedNode);
}
}
}
private static void addConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster,
Element spec, String componentName) {
for (Element node : XML.getChildren(spec, componentName)) {
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private static void validateAndAddConfiguredComponents(DeployState deployState,
ContainerCluster<? extends Container> cluster,
Element spec, String componentName,
Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private void addIdentityProvider(ApplicationContainerCluster cluster,
List<ConfigServerSpec> configServerSpecs,
HostName loadBalancerName,
URI ztsUrl,
String athenzDnsSuffix,
Zone zone,
DeploymentSpec spec) {
spec.athenzDomain()
.ifPresent(domain -> {
AthenzService service = spec.instance(app.getApplicationId().instance())
.flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region()))
.or(() -> spec.athenzService())
.orElseThrow(() -> new IllegalArgumentException("Missing Athenz service configuration in instance '" +
app.getApplicationId().instance() + "'"));
String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix;
IdentityProvider identityProvider = new IdentityProvider(domain,
service,
getLoadBalancerName(loadBalancerName, configServerSpecs),
ztsUrl,
zoneDnsSuffix,
zone);
cluster.addComponent(identityProvider);
cluster.getContainers().forEach(container -> {
container.setProp("identity.domain", domain.value());
container.setProp("identity.service", service.value());
});
});
}
private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) {
return Optional.ofNullable(loadbalancerName)
.orElseGet(
() -> HostName.of(configServerSpecs.stream()
.findFirst()
.map(ConfigServerSpec::getHostName)
.orElse("unknown")
));
}
private static boolean hasZooKeeper(Element spec) {
return XML.getChild(spec, "zookeeper") != null;
}
/** Disallow renderers named "XmlRenderer" or "JsonRenderer" */
private static void validateRendererElement(Element element) {
String idAttr = element.getAttribute("id");
if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) {
throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr));
}
}
public static boolean isContainerTag(Element element) {
return CONTAINER_TAG.equals(element.getTagName());
}
/**
* Validates JVM options and logs a warning or fails deployment (depending on feature flag)
* if anyone of them has invalid syntax or is an option that is unsupported for the running system.
*/
private static class JvmOptions {
private static final Pattern validPattern = Pattern.compile("-[a-zA-z0-9=:./,+-]+");
private static final Pattern invalidInHostedatttern = Pattern.compile("-Xrunjdwp:transport=.*");
private final ContainerCluster<?> cluster;
private final Element nodesElement;
private final DeployLogger logger;
private final boolean legacyOptions;
private final boolean isHosted;
public JvmOptions(ContainerCluster<?> cluster, Element nodesElement, DeployState deployState, boolean legacyOptions) {
this.cluster = cluster;
this.nodesElement = nodesElement;
this.logger = deployState.getDeployLogger();
this.legacyOptions = legacyOptions;
this.isHosted = deployState.isHosted();
}
String build() {
if (legacyOptions)
return buildLegacyOptions();
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) return "";
String jvmOptions = jvmElement.getAttribute(VespaDomBuilder.OPTIONS);
if (jvmOptions.isEmpty()) return "";
validateJvmOptions(jvmOptions);
return jvmOptions;
}
String buildLegacyOptions() {
String jvmOptions = null;
if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS);
if (! jvmOptions.isEmpty())
logger.logApplicationPackage(WARNING, "'jvm-options' is deprecated and will be removed in Vespa 8." +
" Please merge 'jvm-options' into 'options' or 'gc-options' in 'jvm' element." +
" See https:
}
validateJvmOptions(jvmOptions);
return jvmOptions;
}
private void validateJvmOptions(String jvmOptions) {
if (jvmOptions == null || jvmOptions.isEmpty()) return;
String[] optionList = jvmOptions.split(" ");
List<String> invalidOptions = Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> !Pattern.matches(validPattern.pattern(), option))
.sorted()
.collect(Collectors.toList());
if (isHosted)
invalidOptions.addAll(Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> Pattern.matches(invalidInHostedatttern.pattern(), option))
.sorted()
.collect(Collectors.toList()));
if (invalidOptions.isEmpty()) return;
String message = "Invalid or misplaced JVM options in services.xml: " +
String.join(",", invalidOptions) + "." +
" See https:
if (isHosted)
throw new IllegalArgumentException(message);
else
logger.logApplicationPackage(WARNING, message);
}
}
/**
* Validates JVM GC options and logs a warning or fails deployment (depending on feature flag)
* if anyone of them has invalid syntax or is an option that is unsupported for the running system
* (e.g. uses CMS options for hosted Vespa, which uses JDK 17).
*/
private static class JvmGcOptions {
private static final Pattern validPattern = Pattern.compile("-XX:[+-]*[a-zA-z0-9=]+");
private static final Pattern invalidCMSPattern = Pattern.compile("-XX:[+-]\\w*CMS[a-zA-z0-9=]+");
private final DeployState deployState;
private final String jvmGcOptions;
private final DeployLogger logger;
private final boolean isHosted;
public JvmGcOptions(DeployState deployState, String jvmGcOptions) {
this.deployState = deployState;
this.jvmGcOptions = jvmGcOptions;
this.logger = deployState.getDeployLogger();
this.isHosted = deployState.isHosted();
}
private String build() {
String options = deployState.getProperties().jvmGCOptions();
if (jvmGcOptions != null) {
options = jvmGcOptions;
String[] optionList = options.split(" ");
List<String> invalidOptions = Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> !Pattern.matches(validPattern.pattern(), option))
.collect(Collectors.toList());
if (isHosted) {
invalidOptions.addAll(Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> Pattern.matches(invalidCMSPattern.pattern(), option) ||
option.equals("-XX:+UseConcMarkSweepGC"))
.collect(Collectors.toList()));
}
logOrFailInvalidOptions(invalidOptions);
}
if (options == null || options.isEmpty())
options = deployState.isHosted() ? ContainerCluster.PARALLEL_GC : ContainerCluster.G1GC;
return options;
}
private void logOrFailInvalidOptions(List<String> options) {
if (options.isEmpty()) return;
Collections.sort(options);
String message = "Invalid or misplaced JVM GC options in services.xml: " +
String.join(",", options) + "." +
" See https:
if (isHosted)
throw new IllegalArgumentException(message);
else
logger.logApplicationPackage(WARNING, message);
}
}
} |
Consider extracting the timeout as a constant. | private void updateTargets(VersionStatus versionStatus, DeploymentStatusList deploymentStatuses, UpgradePolicy policy, OptionalInt targetMajorVersion) {
InstanceList instances = instances(deploymentStatuses);
InstanceList remaining = instances.with(policy);
Instant failureThreshold = controller().clock().instant().minus(Duration.ofDays(5));
Set<ApplicationId> failingRevision = InstanceList.from(deploymentStatuses.failingApplicationChangeSince(failureThreshold)).asSet();
List<Version> targetAndNewer = new ArrayList<>();
UnaryOperator<InstanceList> cancellationCriterion = policy == UpgradePolicy.canary ? i -> i.not().upgradingTo(targetAndNewer)
: i -> i.failing()
.not().upgradingTo(targetAndNewer);
Map<ApplicationId, Version> targets = new LinkedHashMap<>();
for (Version version : controller().applications().deploymentTrigger().targetsForPolicy(versionStatus, policy)) {
targetAndNewer.add(version);
InstanceList eligible = eligibleForVersion(remaining, version, targetMajorVersion);
InstanceList outdated = cancellationCriterion.apply(eligible);
cancelUpgradesOf(outdated.upgrading(), "Upgrading to outdated versions");
remaining = remaining.not().matching(eligible.asList()::contains)
.not().hasCompleted(Change.of(version));
for (ApplicationId id : outdated.and(eligible.not().upgrading()).not().changingRevision())
targets.put(id, version);
}
int numberToUpgrade = policy == UpgradePolicy.canary ? instances.size() : numberOfApplicationsToUpgrade();
for (ApplicationId id : instances.matching(targets.keySet()::contains).first(numberToUpgrade)) {
log.log(Level.INFO, "Triggering upgrade to " + targets.get(id) + " for " + id);
if (failingRevision.contains(id))
controller().applications().deploymentTrigger().cancelChange(id, ChangesToCancel.APPLICATION);
controller().applications().deploymentTrigger().triggerChange(id, Change.of(targets.get(id)));
}
} | Instant failureThreshold = controller().clock().instant().minus(Duration.ofDays(5)); | private void updateTargets(VersionStatus versionStatus, DeploymentStatusList deploymentStatuses, UpgradePolicy policy, OptionalInt targetMajorVersion) {
InstanceList instances = instances(deploymentStatuses);
InstanceList remaining = instances.with(policy);
Instant failureThreshold = controller().clock().instant().minus(DeploymentTrigger.maxFailingRevisionTime);
Set<ApplicationId> failingRevision = InstanceList.from(deploymentStatuses.failingApplicationChangeSince(failureThreshold)).asSet();
List<Version> targetAndNewer = new ArrayList<>();
UnaryOperator<InstanceList> cancellationCriterion = policy == UpgradePolicy.canary ? i -> i.not().upgradingTo(targetAndNewer)
: i -> i.failing()
.not().upgradingTo(targetAndNewer);
Map<ApplicationId, Version> targets = new LinkedHashMap<>();
for (Version version : controller().applications().deploymentTrigger().targetsForPolicy(versionStatus, policy)) {
targetAndNewer.add(version);
InstanceList eligible = eligibleForVersion(remaining, version, targetMajorVersion);
InstanceList outdated = cancellationCriterion.apply(eligible);
cancelUpgradesOf(outdated.upgrading(), "Upgrading to outdated versions");
remaining = remaining.not().matching(eligible.asList()::contains)
.not().hasCompleted(Change.of(version));
for (ApplicationId id : outdated.and(eligible.not().upgrading()))
targets.put(id, version);
}
int numberToUpgrade = policy == UpgradePolicy.canary ? instances.size() : numberOfApplicationsToUpgrade();
for (ApplicationId id : instances.matching(targets.keySet()::contains).first(numberToUpgrade)) {
log.log(Level.INFO, "Triggering upgrade to " + targets.get(id) + " for " + id);
if (failingRevision.contains(id))
controller().applications().deploymentTrigger().cancelChange(id, ChangesToCancel.APPLICATION);
controller().applications().deploymentTrigger().triggerChange(id, Change.of(targets.get(id)));
}
} | class Upgrader extends ControllerMaintainer {
private static final Logger log = Logger.getLogger(Upgrader.class.getName());
private final CuratorDb curator;
private final Random random;
public Upgrader(Controller controller, Duration interval) {
super(controller, interval);
this.curator = controller.curator();
this.random = new Random(controller.clock().instant().toEpochMilli());
}
/**
* Schedule application upgrades. Note that this implementation must be idempotent.
*/
@Override
public double maintain() {
VersionStatus versionStatus = controller().readVersionStatus();
cancelBrokenUpgrades(versionStatus);
OptionalInt targetMajorVersion = targetMajorVersion();
DeploymentStatusList deploymentStatuses = deploymentStatuses(versionStatus);
for (UpgradePolicy policy : UpgradePolicy.values())
updateTargets(versionStatus, deploymentStatuses, policy, targetMajorVersion);
return 1.0;
}
private DeploymentStatusList deploymentStatuses(VersionStatus versionStatus) {
return controller().jobController().deploymentStatuses(ApplicationList.from(controller().applications().readable())
.withProjectId(),
versionStatus);
}
/** Returns a list of all production application instances, except those which are pinned, which we should not manipulate here. */
private InstanceList instances(DeploymentStatusList deploymentStatuses) {
return InstanceList.from(deploymentStatuses)
.withDeclaredJobs()
.shuffle(random)
.byIncreasingDeployedVersion()
.unpinned();
}
private void cancelBrokenUpgrades(VersionStatus versionStatus) {
InstanceList instances = instances(deploymentStatuses(controller().readVersionStatus()));
for (VespaVersion version : versionStatus.versions()) {
if (version.confidence() == Confidence.broken)
cancelUpgradesOf(instances.upgradingTo(version.versionNumber()).not().with(UpgradePolicy.canary),
version.versionNumber() + " is broken");
}
}
private InstanceList eligibleForVersion(InstanceList instances, Version version,
OptionalInt targetMajorVersion) {
Change change = Change.of(version);
return instances.not().failingOn(version)
.allowingMajorVersion(version.getMajor(), targetMajorVersion.orElse(version.getMajor()))
.compatibleWithPlatform(version, controller().applications()::versionCompatibility)
.not().hasCompleted(change)
.onLowerVersionThan(version)
.canUpgradeAt(version, controller().clock().instant());
}
private void cancelUpgradesOf(InstanceList instances, String reason) {
instances = instances.unpinned();
if (instances.isEmpty()) return;
log.info("Cancelling upgrading of " + instances.asList() + " instances: " + reason);
for (ApplicationId instance : instances.asList())
controller().applications().deploymentTrigger().cancelChange(instance, PLATFORM);
}
/** Returns the number of applications to upgrade in this run */
private int numberOfApplicationsToUpgrade() {
return numberOfApplicationsToUpgrade(interval().dividedBy(Math.max(1, controller().curator().cluster().size())).toMillis(),
controller().clock().millis(),
upgradesPerMinute());
}
/** Returns the number of applications to upgrade in the interval containing now */
static int numberOfApplicationsToUpgrade(long intervalMillis, long nowMillis, double upgradesPerMinute) {
long intervalStart = Math.round(nowMillis / (double) intervalMillis) * intervalMillis;
double upgradesPerMilli = upgradesPerMinute / 60_000;
long upgradesAtStart = (long) (intervalStart * upgradesPerMilli);
long upgradesAtEnd = (long) ((intervalStart + intervalMillis) * upgradesPerMilli);
return (int) (upgradesAtEnd - upgradesAtStart);
}
/** Returns number of upgrades per minute */
public double upgradesPerMinute() {
return curator.readUpgradesPerMinute();
}
/** Sets the number of upgrades per minute */
public void setUpgradesPerMinute(double n) {
if (n < 0)
throw new IllegalArgumentException("Upgrades per minute must be >= 0, got " + n);
curator.writeUpgradesPerMinute(n);
}
/** Returns the target major version for applications not specifying one */
public OptionalInt targetMajorVersion() {
return controller().applications().targetMajorVersion();
}
/** Sets the default target major version. Set to empty to determine target version normally (by confidence) */
public void setTargetMajorVersion(Optional<Integer> targetMajorVersion) {
controller().applications().setTargetMajorVersion(targetMajorVersion);
}
/** Override confidence for given version. This will cause the computed confidence to be ignored */
public void overrideConfidence(Version version, Confidence confidence) {
if (confidence == Confidence.aborted && !version.isAfter(controller().readSystemVersion())) {
throw new IllegalArgumentException("Cannot override confidence to " + confidence +
" for version " + version.toFullString() +
": Version may be in use by applications");
}
try (Mutex lock = curator.lockConfidenceOverrides()) {
Map<Version, Confidence> overrides = new LinkedHashMap<>(curator.readConfidenceOverrides());
overrides.put(version, confidence);
curator.writeConfidenceOverrides(overrides);
}
}
/** Returns all confidence overrides */
public Map<Version, Confidence> confidenceOverrides() {
return curator.readConfidenceOverrides();
}
/** Remove confidence override for given version */
public void removeConfidenceOverride(Version version) {
controller().removeConfidenceOverride(version::equals);
}
} | class Upgrader extends ControllerMaintainer {
private static final Logger log = Logger.getLogger(Upgrader.class.getName());
private final CuratorDb curator;
private final Random random;
public Upgrader(Controller controller, Duration interval) {
super(controller, interval);
this.curator = controller.curator();
this.random = new Random(controller.clock().instant().toEpochMilli());
}
/**
* Schedule application upgrades. Note that this implementation must be idempotent.
*/
@Override
public double maintain() {
VersionStatus versionStatus = controller().readVersionStatus();
cancelBrokenUpgrades(versionStatus);
OptionalInt targetMajorVersion = targetMajorVersion();
DeploymentStatusList deploymentStatuses = deploymentStatuses(versionStatus);
for (UpgradePolicy policy : UpgradePolicy.values())
updateTargets(versionStatus, deploymentStatuses, policy, targetMajorVersion);
return 1.0;
}
private DeploymentStatusList deploymentStatuses(VersionStatus versionStatus) {
return controller().jobController().deploymentStatuses(ApplicationList.from(controller().applications().readable())
.withProjectId(),
versionStatus);
}
/** Returns a list of all production application instances, except those which are pinned, which we should not manipulate here. */
private InstanceList instances(DeploymentStatusList deploymentStatuses) {
return InstanceList.from(deploymentStatuses)
.withDeclaredJobs()
.shuffle(random)
.byIncreasingDeployedVersion()
.unpinned();
}
private void cancelBrokenUpgrades(VersionStatus versionStatus) {
InstanceList instances = instances(deploymentStatuses(controller().readVersionStatus()));
for (VespaVersion version : versionStatus.versions()) {
if (version.confidence() == Confidence.broken)
cancelUpgradesOf(instances.upgradingTo(version.versionNumber()).not().with(UpgradePolicy.canary),
version.versionNumber() + " is broken");
}
}
private InstanceList eligibleForVersion(InstanceList instances, Version version,
OptionalInt targetMajorVersion) {
Change change = Change.of(version);
return instances.not().failingOn(version)
.allowingMajorVersion(version.getMajor(), targetMajorVersion.orElse(version.getMajor()))
.compatibleWithPlatform(version, controller().applications()::versionCompatibility)
.not().hasCompleted(change)
.onLowerVersionThan(version)
.canUpgradeAt(version, controller().clock().instant());
}
private void cancelUpgradesOf(InstanceList instances, String reason) {
instances = instances.unpinned();
if (instances.isEmpty()) return;
log.info("Cancelling upgrading of " + instances.asList() + " instances: " + reason);
for (ApplicationId instance : instances.asList())
controller().applications().deploymentTrigger().cancelChange(instance, PLATFORM);
}
/** Returns the number of applications to upgrade in this run */
private int numberOfApplicationsToUpgrade() {
return numberOfApplicationsToUpgrade(interval().dividedBy(Math.max(1, controller().curator().cluster().size())).toMillis(),
controller().clock().millis(),
upgradesPerMinute());
}
/** Returns the number of applications to upgrade in the interval containing now */
static int numberOfApplicationsToUpgrade(long intervalMillis, long nowMillis, double upgradesPerMinute) {
long intervalStart = Math.round(nowMillis / (double) intervalMillis) * intervalMillis;
double upgradesPerMilli = upgradesPerMinute / 60_000;
long upgradesAtStart = (long) (intervalStart * upgradesPerMilli);
long upgradesAtEnd = (long) ((intervalStart + intervalMillis) * upgradesPerMilli);
return (int) (upgradesAtEnd - upgradesAtStart);
}
/** Returns number of upgrades per minute */
public double upgradesPerMinute() {
return curator.readUpgradesPerMinute();
}
/** Sets the number of upgrades per minute */
public void setUpgradesPerMinute(double n) {
if (n < 0)
throw new IllegalArgumentException("Upgrades per minute must be >= 0, got " + n);
curator.writeUpgradesPerMinute(n);
}
/** Returns the target major version for applications not specifying one */
public OptionalInt targetMajorVersion() {
return controller().applications().targetMajorVersion();
}
/** Sets the default target major version. Set to empty to determine target version normally (by confidence) */
public void setTargetMajorVersion(Optional<Integer> targetMajorVersion) {
controller().applications().setTargetMajorVersion(targetMajorVersion);
}
/** Override confidence for given version. This will cause the computed confidence to be ignored */
public void overrideConfidence(Version version, Confidence confidence) {
if (confidence == Confidence.aborted && !version.isAfter(controller().readSystemVersion())) {
throw new IllegalArgumentException("Cannot override confidence to " + confidence +
" for version " + version.toFullString() +
": Version may be in use by applications");
}
try (Mutex lock = curator.lockConfidenceOverrides()) {
Map<Version, Confidence> overrides = new LinkedHashMap<>(curator.readConfidenceOverrides());
overrides.put(version, confidence);
curator.writeConfidenceOverrides(overrides);
}
}
/** Returns all confidence overrides */
public Map<Version, Confidence> confidenceOverrides() {
return curator.readConfidenceOverrides();
}
/** Remove confidence override for given version */
public void removeConfidenceOverride(Version version) {
controller().removeConfidenceOverride(version::equals);
}
} |
Yes ... | private void updateTargets(VersionStatus versionStatus, DeploymentStatusList deploymentStatuses, UpgradePolicy policy, OptionalInt targetMajorVersion) {
InstanceList instances = instances(deploymentStatuses);
InstanceList remaining = instances.with(policy);
Instant failureThreshold = controller().clock().instant().minus(Duration.ofDays(5));
Set<ApplicationId> failingRevision = InstanceList.from(deploymentStatuses.failingApplicationChangeSince(failureThreshold)).asSet();
List<Version> targetAndNewer = new ArrayList<>();
UnaryOperator<InstanceList> cancellationCriterion = policy == UpgradePolicy.canary ? i -> i.not().upgradingTo(targetAndNewer)
: i -> i.failing()
.not().upgradingTo(targetAndNewer);
Map<ApplicationId, Version> targets = new LinkedHashMap<>();
for (Version version : controller().applications().deploymentTrigger().targetsForPolicy(versionStatus, policy)) {
targetAndNewer.add(version);
InstanceList eligible = eligibleForVersion(remaining, version, targetMajorVersion);
InstanceList outdated = cancellationCriterion.apply(eligible);
cancelUpgradesOf(outdated.upgrading(), "Upgrading to outdated versions");
remaining = remaining.not().matching(eligible.asList()::contains)
.not().hasCompleted(Change.of(version));
for (ApplicationId id : outdated.and(eligible.not().upgrading()).not().changingRevision())
targets.put(id, version);
}
int numberToUpgrade = policy == UpgradePolicy.canary ? instances.size() : numberOfApplicationsToUpgrade();
for (ApplicationId id : instances.matching(targets.keySet()::contains).first(numberToUpgrade)) {
log.log(Level.INFO, "Triggering upgrade to " + targets.get(id) + " for " + id);
if (failingRevision.contains(id))
controller().applications().deploymentTrigger().cancelChange(id, ChangesToCancel.APPLICATION);
controller().applications().deploymentTrigger().triggerChange(id, Change.of(targets.get(id)));
}
} | Instant failureThreshold = controller().clock().instant().minus(Duration.ofDays(5)); | private void updateTargets(VersionStatus versionStatus, DeploymentStatusList deploymentStatuses, UpgradePolicy policy, OptionalInt targetMajorVersion) {
InstanceList instances = instances(deploymentStatuses);
InstanceList remaining = instances.with(policy);
Instant failureThreshold = controller().clock().instant().minus(DeploymentTrigger.maxFailingRevisionTime);
Set<ApplicationId> failingRevision = InstanceList.from(deploymentStatuses.failingApplicationChangeSince(failureThreshold)).asSet();
List<Version> targetAndNewer = new ArrayList<>();
UnaryOperator<InstanceList> cancellationCriterion = policy == UpgradePolicy.canary ? i -> i.not().upgradingTo(targetAndNewer)
: i -> i.failing()
.not().upgradingTo(targetAndNewer);
Map<ApplicationId, Version> targets = new LinkedHashMap<>();
for (Version version : controller().applications().deploymentTrigger().targetsForPolicy(versionStatus, policy)) {
targetAndNewer.add(version);
InstanceList eligible = eligibleForVersion(remaining, version, targetMajorVersion);
InstanceList outdated = cancellationCriterion.apply(eligible);
cancelUpgradesOf(outdated.upgrading(), "Upgrading to outdated versions");
remaining = remaining.not().matching(eligible.asList()::contains)
.not().hasCompleted(Change.of(version));
for (ApplicationId id : outdated.and(eligible.not().upgrading()))
targets.put(id, version);
}
int numberToUpgrade = policy == UpgradePolicy.canary ? instances.size() : numberOfApplicationsToUpgrade();
for (ApplicationId id : instances.matching(targets.keySet()::contains).first(numberToUpgrade)) {
log.log(Level.INFO, "Triggering upgrade to " + targets.get(id) + " for " + id);
if (failingRevision.contains(id))
controller().applications().deploymentTrigger().cancelChange(id, ChangesToCancel.APPLICATION);
controller().applications().deploymentTrigger().triggerChange(id, Change.of(targets.get(id)));
}
} | class Upgrader extends ControllerMaintainer {
private static final Logger log = Logger.getLogger(Upgrader.class.getName());
private final CuratorDb curator;
private final Random random;
public Upgrader(Controller controller, Duration interval) {
super(controller, interval);
this.curator = controller.curator();
this.random = new Random(controller.clock().instant().toEpochMilli());
}
/**
* Schedule application upgrades. Note that this implementation must be idempotent.
*/
@Override
public double maintain() {
VersionStatus versionStatus = controller().readVersionStatus();
cancelBrokenUpgrades(versionStatus);
OptionalInt targetMajorVersion = targetMajorVersion();
DeploymentStatusList deploymentStatuses = deploymentStatuses(versionStatus);
for (UpgradePolicy policy : UpgradePolicy.values())
updateTargets(versionStatus, deploymentStatuses, policy, targetMajorVersion);
return 1.0;
}
private DeploymentStatusList deploymentStatuses(VersionStatus versionStatus) {
return controller().jobController().deploymentStatuses(ApplicationList.from(controller().applications().readable())
.withProjectId(),
versionStatus);
}
/** Returns a list of all production application instances, except those which are pinned, which we should not manipulate here. */
private InstanceList instances(DeploymentStatusList deploymentStatuses) {
return InstanceList.from(deploymentStatuses)
.withDeclaredJobs()
.shuffle(random)
.byIncreasingDeployedVersion()
.unpinned();
}
private void cancelBrokenUpgrades(VersionStatus versionStatus) {
InstanceList instances = instances(deploymentStatuses(controller().readVersionStatus()));
for (VespaVersion version : versionStatus.versions()) {
if (version.confidence() == Confidence.broken)
cancelUpgradesOf(instances.upgradingTo(version.versionNumber()).not().with(UpgradePolicy.canary),
version.versionNumber() + " is broken");
}
}
private InstanceList eligibleForVersion(InstanceList instances, Version version,
OptionalInt targetMajorVersion) {
Change change = Change.of(version);
return instances.not().failingOn(version)
.allowingMajorVersion(version.getMajor(), targetMajorVersion.orElse(version.getMajor()))
.compatibleWithPlatform(version, controller().applications()::versionCompatibility)
.not().hasCompleted(change)
.onLowerVersionThan(version)
.canUpgradeAt(version, controller().clock().instant());
}
private void cancelUpgradesOf(InstanceList instances, String reason) {
instances = instances.unpinned();
if (instances.isEmpty()) return;
log.info("Cancelling upgrading of " + instances.asList() + " instances: " + reason);
for (ApplicationId instance : instances.asList())
controller().applications().deploymentTrigger().cancelChange(instance, PLATFORM);
}
/** Returns the number of applications to upgrade in this run */
private int numberOfApplicationsToUpgrade() {
return numberOfApplicationsToUpgrade(interval().dividedBy(Math.max(1, controller().curator().cluster().size())).toMillis(),
controller().clock().millis(),
upgradesPerMinute());
}
/** Returns the number of applications to upgrade in the interval containing now */
static int numberOfApplicationsToUpgrade(long intervalMillis, long nowMillis, double upgradesPerMinute) {
long intervalStart = Math.round(nowMillis / (double) intervalMillis) * intervalMillis;
double upgradesPerMilli = upgradesPerMinute / 60_000;
long upgradesAtStart = (long) (intervalStart * upgradesPerMilli);
long upgradesAtEnd = (long) ((intervalStart + intervalMillis) * upgradesPerMilli);
return (int) (upgradesAtEnd - upgradesAtStart);
}
/** Returns number of upgrades per minute */
public double upgradesPerMinute() {
return curator.readUpgradesPerMinute();
}
/** Sets the number of upgrades per minute */
public void setUpgradesPerMinute(double n) {
if (n < 0)
throw new IllegalArgumentException("Upgrades per minute must be >= 0, got " + n);
curator.writeUpgradesPerMinute(n);
}
/** Returns the target major version for applications not specifying one */
public OptionalInt targetMajorVersion() {
return controller().applications().targetMajorVersion();
}
/** Sets the default target major version. Set to empty to determine target version normally (by confidence) */
public void setTargetMajorVersion(Optional<Integer> targetMajorVersion) {
controller().applications().setTargetMajorVersion(targetMajorVersion);
}
/** Override confidence for given version. This will cause the computed confidence to be ignored */
public void overrideConfidence(Version version, Confidence confidence) {
if (confidence == Confidence.aborted && !version.isAfter(controller().readSystemVersion())) {
throw new IllegalArgumentException("Cannot override confidence to " + confidence +
" for version " + version.toFullString() +
": Version may be in use by applications");
}
try (Mutex lock = curator.lockConfidenceOverrides()) {
Map<Version, Confidence> overrides = new LinkedHashMap<>(curator.readConfidenceOverrides());
overrides.put(version, confidence);
curator.writeConfidenceOverrides(overrides);
}
}
/** Returns all confidence overrides */
public Map<Version, Confidence> confidenceOverrides() {
return curator.readConfidenceOverrides();
}
/** Remove confidence override for given version */
public void removeConfidenceOverride(Version version) {
controller().removeConfidenceOverride(version::equals);
}
} | class Upgrader extends ControllerMaintainer {
private static final Logger log = Logger.getLogger(Upgrader.class.getName());
private final CuratorDb curator;
private final Random random;
public Upgrader(Controller controller, Duration interval) {
super(controller, interval);
this.curator = controller.curator();
this.random = new Random(controller.clock().instant().toEpochMilli());
}
/**
* Schedule application upgrades. Note that this implementation must be idempotent.
*/
@Override
public double maintain() {
VersionStatus versionStatus = controller().readVersionStatus();
cancelBrokenUpgrades(versionStatus);
OptionalInt targetMajorVersion = targetMajorVersion();
DeploymentStatusList deploymentStatuses = deploymentStatuses(versionStatus);
for (UpgradePolicy policy : UpgradePolicy.values())
updateTargets(versionStatus, deploymentStatuses, policy, targetMajorVersion);
return 1.0;
}
private DeploymentStatusList deploymentStatuses(VersionStatus versionStatus) {
return controller().jobController().deploymentStatuses(ApplicationList.from(controller().applications().readable())
.withProjectId(),
versionStatus);
}
/** Returns a list of all production application instances, except those which are pinned, which we should not manipulate here. */
private InstanceList instances(DeploymentStatusList deploymentStatuses) {
return InstanceList.from(deploymentStatuses)
.withDeclaredJobs()
.shuffle(random)
.byIncreasingDeployedVersion()
.unpinned();
}
private void cancelBrokenUpgrades(VersionStatus versionStatus) {
InstanceList instances = instances(deploymentStatuses(controller().readVersionStatus()));
for (VespaVersion version : versionStatus.versions()) {
if (version.confidence() == Confidence.broken)
cancelUpgradesOf(instances.upgradingTo(version.versionNumber()).not().with(UpgradePolicy.canary),
version.versionNumber() + " is broken");
}
}
private InstanceList eligibleForVersion(InstanceList instances, Version version,
OptionalInt targetMajorVersion) {
Change change = Change.of(version);
return instances.not().failingOn(version)
.allowingMajorVersion(version.getMajor(), targetMajorVersion.orElse(version.getMajor()))
.compatibleWithPlatform(version, controller().applications()::versionCompatibility)
.not().hasCompleted(change)
.onLowerVersionThan(version)
.canUpgradeAt(version, controller().clock().instant());
}
private void cancelUpgradesOf(InstanceList instances, String reason) {
instances = instances.unpinned();
if (instances.isEmpty()) return;
log.info("Cancelling upgrading of " + instances.asList() + " instances: " + reason);
for (ApplicationId instance : instances.asList())
controller().applications().deploymentTrigger().cancelChange(instance, PLATFORM);
}
/** Returns the number of applications to upgrade in this run */
private int numberOfApplicationsToUpgrade() {
return numberOfApplicationsToUpgrade(interval().dividedBy(Math.max(1, controller().curator().cluster().size())).toMillis(),
controller().clock().millis(),
upgradesPerMinute());
}
/** Returns the number of applications to upgrade in the interval containing now */
static int numberOfApplicationsToUpgrade(long intervalMillis, long nowMillis, double upgradesPerMinute) {
long intervalStart = Math.round(nowMillis / (double) intervalMillis) * intervalMillis;
double upgradesPerMilli = upgradesPerMinute / 60_000;
long upgradesAtStart = (long) (intervalStart * upgradesPerMilli);
long upgradesAtEnd = (long) ((intervalStart + intervalMillis) * upgradesPerMilli);
return (int) (upgradesAtEnd - upgradesAtStart);
}
/** Returns number of upgrades per minute */
public double upgradesPerMinute() {
return curator.readUpgradesPerMinute();
}
/** Sets the number of upgrades per minute */
public void setUpgradesPerMinute(double n) {
if (n < 0)
throw new IllegalArgumentException("Upgrades per minute must be >= 0, got " + n);
curator.writeUpgradesPerMinute(n);
}
/** Returns the target major version for applications not specifying one */
public OptionalInt targetMajorVersion() {
return controller().applications().targetMajorVersion();
}
/** Sets the default target major version. Set to empty to determine target version normally (by confidence) */
public void setTargetMajorVersion(Optional<Integer> targetMajorVersion) {
controller().applications().setTargetMajorVersion(targetMajorVersion);
}
/** Override confidence for given version. This will cause the computed confidence to be ignored */
public void overrideConfidence(Version version, Confidence confidence) {
if (confidence == Confidence.aborted && !version.isAfter(controller().readSystemVersion())) {
throw new IllegalArgumentException("Cannot override confidence to " + confidence +
" for version " + version.toFullString() +
": Version may be in use by applications");
}
try (Mutex lock = curator.lockConfidenceOverrides()) {
Map<Version, Confidence> overrides = new LinkedHashMap<>(curator.readConfidenceOverrides());
overrides.put(version, confidence);
curator.writeConfidenceOverrides(overrides);
}
}
/** Returns all confidence overrides */
public Map<Version, Confidence> confidenceOverrides() {
return curator.readConfidenceOverrides();
}
/** Remove confidence override for given version */
public void removeConfidenceOverride(Version version) {
controller().removeConfidenceOverride(version::equals);
}
} |
Let us hope this is never called.... | public int size() {
return keySet().size();
} | return keySet().size(); | public int size() {
return (primary.size() >= secondary.size())
? countUnique(primary, secondary)
: countUnique(secondary, primary);
} | class ChainedMap<K, V> implements Map<K, V> {
private final Map<K, V> primary, secondary;
ChainedMap(Map<K, V> primary, Map<K, V> secondary) {
this.primary = primary;
this.secondary = secondary;
}
@Override
@Override
public boolean isEmpty() {
return primary.isEmpty() && secondary.isEmpty();
}
@Override
public boolean containsKey(Object key) {
return primary.containsKey(key) || secondary.containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return primary.containsValue(value) || secondary.containsValue(value);
}
@Override
public V get(Object key) {
V value = primary.get(key);
return value != null ? value : secondary.get(key);
}
@Override
public V put(K key, V value) {
throw new UnsupportedOperationException();
}
@Override
public V remove(Object key) {
throw new UnsupportedOperationException();
}
@Override
public void putAll(Map<? extends K, ? extends V> m) {
throw new UnsupportedOperationException();
}
@Override
public void clear() {
throw new UnsupportedOperationException();
}
@Override
public Set<K> keySet() {
var keys = new HashSet<>(secondary.keySet());
keys.addAll(primary.keySet());
return keys;
}
@Override
public Collection<V> values() {
throw new UnsupportedOperationException();
}
@Override
public Set<Entry<K, V>> entrySet() {
throw new UnsupportedOperationException();
}
@Override
public boolean equals(Object o) {
throw new UnsupportedOperationException();
}
@Override
public int hashCode() {
throw new UnsupportedOperationException();
}
} | class ChainedMap<K, V> implements Map<K, V> {
private final Map<K, V> primary, secondary;
ChainedMap(Map<K, V> primary, Map<K, V> secondary) {
this.primary = primary;
this.secondary = secondary;
}
@Override
private int countUnique(Map<K, V> large, Map<K,V> small) {
int size = large.size();
for (K key : small.keySet()) {
if ( ! large.containsKey(key)) size++;
}
return size;
}
@Override
public boolean isEmpty() {
return primary.isEmpty() && secondary.isEmpty();
}
@Override
public boolean containsKey(Object key) {
return primary.containsKey(key) || secondary.containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return primary.containsValue(value) || secondary.containsValue(value);
}
@Override
public V get(Object key) {
V value = primary.get(key);
return value != null ? value : secondary.get(key);
}
@Override
public V put(K key, V value) {
throw new UnsupportedOperationException();
}
@Override
public V remove(Object key) {
throw new UnsupportedOperationException();
}
@Override
public void putAll(Map<? extends K, ? extends V> m) {
throw new UnsupportedOperationException();
}
@Override
public void clear() {
throw new UnsupportedOperationException();
}
@Override
public Set<K> keySet() {
var keys = new HashSet<>(secondary.keySet());
keys.addAll(primary.keySet());
return keys;
}
@Override
public Collection<V> values() {
throw new UnsupportedOperationException();
}
@Override
public Set<Entry<K, V>> entrySet() {
throw new UnsupportedOperationException();
}
@Override
public boolean equals(Object o) {
throw new UnsupportedOperationException();
}
@Override
public int hashCode() {
throw new UnsupportedOperationException();
}
} |
A bit faster | public int size() {
return keySet().size();
} | return keySet().size(); | public int size() {
return (primary.size() >= secondary.size())
? countUnique(primary, secondary)
: countUnique(secondary, primary);
} | class ChainedMap<K, V> implements Map<K, V> {
private final Map<K, V> primary, secondary;
ChainedMap(Map<K, V> primary, Map<K, V> secondary) {
this.primary = primary;
this.secondary = secondary;
}
@Override
@Override
public boolean isEmpty() {
return primary.isEmpty() && secondary.isEmpty();
}
@Override
public boolean containsKey(Object key) {
return primary.containsKey(key) || secondary.containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return primary.containsValue(value) || secondary.containsValue(value);
}
@Override
public V get(Object key) {
V value = primary.get(key);
return value != null ? value : secondary.get(key);
}
@Override
public V put(K key, V value) {
throw new UnsupportedOperationException();
}
@Override
public V remove(Object key) {
throw new UnsupportedOperationException();
}
@Override
public void putAll(Map<? extends K, ? extends V> m) {
throw new UnsupportedOperationException();
}
@Override
public void clear() {
throw new UnsupportedOperationException();
}
@Override
public Set<K> keySet() {
var keys = new HashSet<>(secondary.keySet());
keys.addAll(primary.keySet());
return keys;
}
@Override
public Collection<V> values() {
throw new UnsupportedOperationException();
}
@Override
public Set<Entry<K, V>> entrySet() {
throw new UnsupportedOperationException();
}
@Override
public boolean equals(Object o) {
throw new UnsupportedOperationException();
}
@Override
public int hashCode() {
throw new UnsupportedOperationException();
}
} | class ChainedMap<K, V> implements Map<K, V> {
private final Map<K, V> primary, secondary;
ChainedMap(Map<K, V> primary, Map<K, V> secondary) {
this.primary = primary;
this.secondary = secondary;
}
@Override
private int countUnique(Map<K, V> large, Map<K,V> small) {
int size = large.size();
for (K key : small.keySet()) {
if ( ! large.containsKey(key)) size++;
}
return size;
}
@Override
public boolean isEmpty() {
return primary.isEmpty() && secondary.isEmpty();
}
@Override
public boolean containsKey(Object key) {
return primary.containsKey(key) || secondary.containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return primary.containsValue(value) || secondary.containsValue(value);
}
@Override
public V get(Object key) {
V value = primary.get(key);
return value != null ? value : secondary.get(key);
}
@Override
public V put(K key, V value) {
throw new UnsupportedOperationException();
}
@Override
public V remove(Object key) {
throw new UnsupportedOperationException();
}
@Override
public void putAll(Map<? extends K, ? extends V> m) {
throw new UnsupportedOperationException();
}
@Override
public void clear() {
throw new UnsupportedOperationException();
}
@Override
public Set<K> keySet() {
var keys = new HashSet<>(secondary.keySet());
keys.addAll(primary.keySet());
return keys;
}
@Override
public Collection<V> values() {
throw new UnsupportedOperationException();
}
@Override
public Set<Entry<K, V>> entrySet() {
throw new UnsupportedOperationException();
}
@Override
public boolean equals(Object o) {
throw new UnsupportedOperationException();
}
@Override
public int hashCode() {
throw new UnsupportedOperationException();
}
} |
This is used to pre-check the mv's plan. Now only SPJ or SPJG are supported. | public static boolean isValidMVPlan(OptExpression root) {
if (root == null) {
return false;
}
return isLogicalSPJ(root) || isLogicalSPJG(root);
} | return isLogicalSPJ(root) || isLogicalSPJG(root); | public static boolean isValidMVPlan(OptExpression root) {
if (root == null) {
return false;
}
return isLogicalSPJ(root) || isLogicalSPJG(root);
} | class Utils {
private static final Logger LOG = LogManager.getLogger(Utils.class);
public static List<ScalarOperator> extractConjuncts(ScalarOperator root) {
LinkedList<ScalarOperator> list = new LinkedList<>();
if (null == root) {
return list;
}
extractConjunctsImpl(root, list);
return list;
}
public static void extractConjunctsImpl(ScalarOperator root, List<ScalarOperator> result) {
if (!OperatorType.COMPOUND.equals(root.getOpType())) {
result.add(root);
return;
}
CompoundPredicateOperator cpo = (CompoundPredicateOperator) root;
if (!cpo.isAnd()) {
result.add(root);
return;
}
extractConjunctsImpl(cpo.getChild(0), result);
extractConjunctsImpl(cpo.getChild(1), result);
}
public static List<ScalarOperator> extractDisjunctive(ScalarOperator root) {
LinkedList<ScalarOperator> list = new LinkedList<>();
if (null == root) {
return list;
}
extractDisjunctiveImpl(root, list);
return list;
}
public static void extractDisjunctiveImpl(ScalarOperator root, List<ScalarOperator> result) {
if (!OperatorType.COMPOUND.equals(root.getOpType())) {
result.add(root);
return;
}
CompoundPredicateOperator cpo = (CompoundPredicateOperator) root;
if (!cpo.isOr()) {
result.add(root);
return;
}
extractDisjunctiveImpl(cpo.getChild(0), result);
extractDisjunctiveImpl(cpo.getChild(1), result);
}
public static List<ColumnRefOperator> extractColumnRef(ScalarOperator root) {
if (null == root || !root.isVariable()) {
return new LinkedList<>();
}
LinkedList<ColumnRefOperator> list = new LinkedList<>();
if (OperatorType.VARIABLE.equals(root.getOpType())) {
list.add((ColumnRefOperator) root);
return list;
}
for (ScalarOperator child : root.getChildren()) {
list.addAll(extractColumnRef(child));
}
return list;
}
public static int countColumnRef(ScalarOperator root) {
return countColumnRef(root, 0);
}
private static int countColumnRef(ScalarOperator root, int count) {
if (null == root || !root.isVariable()) {
return 0;
}
if (OperatorType.VARIABLE.equals(root.getOpType())) {
return 1;
}
for (ScalarOperator child : root.getChildren()) {
count += countColumnRef(child, count);
}
return count;
}
public static void extractOlapScanOperator(GroupExpression groupExpression, List<LogicalOlapScanOperator> list) {
extractOperator(groupExpression, list, p -> OperatorType.LOGICAL_OLAP_SCAN.equals(p.getOpType()));
}
private static <E extends Operator> void extractOperator(GroupExpression root, List<E> list,
Predicate<Operator> lambda) {
if (lambda.test(root.getOp())) {
list.add((E) root.getOp());
return;
}
List<Group> groups = root.getInputs();
for (Group group : groups) {
GroupExpression expression = group.getFirstLogicalExpression();
extractOperator(expression, list, lambda);
}
}
public static boolean containAnyColumnRefs(List<ColumnRefOperator> refs, ScalarOperator operator) {
if (refs.isEmpty() || null == operator) {
return false;
}
if (operator.isColumnRef()) {
return refs.contains(operator);
}
for (ScalarOperator so : operator.getChildren()) {
if (containAnyColumnRefs(refs, so)) {
return true;
}
}
return false;
}
public static boolean containColumnRef(ScalarOperator operator, String column) {
if (null == column || null == operator) {
return false;
}
if (operator.isColumnRef()) {
return ((ColumnRefOperator) operator).getName().equalsIgnoreCase(column);
}
for (ScalarOperator so : operator.getChildren()) {
if (containColumnRef(so, column)) {
return true;
}
}
return false;
}
public static ScalarOperator compoundOr(Collection<ScalarOperator> nodes) {
return createCompound(CompoundPredicateOperator.CompoundType.OR, nodes);
}
public static ScalarOperator compoundOr(ScalarOperator... nodes) {
return createCompound(CompoundPredicateOperator.CompoundType.OR, Arrays.asList(nodes));
}
public static ScalarOperator compoundAnd(Collection<ScalarOperator> nodes) {
return createCompound(CompoundPredicateOperator.CompoundType.AND, nodes);
}
public static ScalarOperator compoundAnd(ScalarOperator... nodes) {
return createCompound(CompoundPredicateOperator.CompoundType.AND, Arrays.asList(nodes));
}
public static ScalarOperator createCompound(CompoundPredicateOperator.CompoundType type,
Collection<ScalarOperator> nodes) {
LinkedList<ScalarOperator> link =
nodes.stream().filter(Objects::nonNull).collect(Collectors.toCollection(Lists::newLinkedList));
if (link.size() < 1) {
return null;
}
if (link.size() == 1) {
return link.get(0);
}
while (link.size() > 1) {
LinkedList<ScalarOperator> buffer = new LinkedList<>();
while (link.size() >= 2) {
buffer.add(new CompoundPredicateOperator(type, link.poll(), link.poll()));
}
if (!link.isEmpty()) {
buffer.add(link.remove());
}
link = buffer;
}
return link.remove();
}
public static boolean isInnerOrCrossJoin(Operator operator) {
if (operator instanceof LogicalJoinOperator) {
LogicalJoinOperator joinOperator = (LogicalJoinOperator) operator;
return joinOperator.isInnerOrCrossJoin();
}
return false;
}
public static int countInnerJoinNodeSize(OptExpression root) {
int count = 0;
Operator operator = root.getOp();
for (OptExpression child : root.getInputs()) {
if (isInnerOrCrossJoin(operator) && ((LogicalJoinOperator) operator).getJoinHint().isEmpty()) {
count += countInnerJoinNodeSize(child);
} else {
count = Math.max(count, countInnerJoinNodeSize(child));
}
}
if (isInnerOrCrossJoin(operator) && ((LogicalJoinOperator) operator).getJoinHint().isEmpty()) {
count += 1;
}
return count;
}
public static boolean capableSemiReorder(OptExpression root, boolean hasSemi, int joinNum, int maxJoin) {
Operator operator = root.getOp();
if (operator instanceof LogicalJoinOperator) {
if (((LogicalJoinOperator) operator).getJoinType().isSemiAntiJoin()) {
hasSemi = true;
} else {
joinNum = joinNum + 1;
}
if (joinNum > maxJoin && hasSemi) {
return false;
}
}
for (OptExpression child : root.getInputs()) {
if (operator instanceof LogicalJoinOperator) {
if (!capableSemiReorder(child, hasSemi, joinNum, maxJoin)) {
return false;
}
} else {
if (!capableSemiReorder(child, false, 0, maxJoin)) {
return false;
}
}
}
return true;
}
public static boolean hasUnknownColumnsStats(OptExpression root) {
Operator operator = root.getOp();
if (operator instanceof LogicalScanOperator) {
LogicalScanOperator scanOperator = (LogicalScanOperator) operator;
List<String> colNames =
scanOperator.getColRefToColumnMetaMap().values().stream().map(Column::getName).collect(
Collectors.toList());
if (operator instanceof LogicalOlapScanOperator) {
Table table = scanOperator.getTable();
if (table instanceof OlapTable) {
if (KeysType.AGG_KEYS.equals(((OlapTable) table).getKeysType())) {
List<String> keyColumnNames =
scanOperator.getColRefToColumnMetaMap().values().stream().filter(Column::isKey)
.map(Column::getName)
.collect(Collectors.toList());
List<ColumnStatistic> keyColumnStatisticList =
GlobalStateMgr.getCurrentStatisticStorage().getColumnStatistics(table, keyColumnNames);
return keyColumnStatisticList.stream().anyMatch(ColumnStatistic::isUnknown);
}
}
List<ColumnStatistic> columnStatisticList =
GlobalStateMgr.getCurrentStatisticStorage().getColumnStatistics(table, colNames);
return columnStatisticList.stream().anyMatch(ColumnStatistic::isUnknown);
} else if (operator instanceof LogicalHiveScanOperator || operator instanceof LogicalHudiScanOperator) {
if (ConnectContext.get().getSessionVariable().enableHiveColumnStats()) {
if (operator instanceof LogicalHiveScanOperator) {
return ((LogicalHiveScanOperator) operator).hasUnknownColumn();
} else {
return ((LogicalHudiScanOperator) operator).hasUnknownColumn();
}
}
return true;
} else if (operator instanceof LogicalIcebergScanOperator) {
IcebergTable table = (IcebergTable) scanOperator.getTable();
try {
List<ColumnStatistic> columnStatisticList = IcebergTableStatisticCalculator.getColumnStatistics(
new ArrayList<>(), table.getIcebergTable(),
scanOperator.getColRefToColumnMetaMap());
return columnStatisticList.stream().anyMatch(ColumnStatistic::isUnknown);
} catch (Exception e) {
LOG.warn("Iceberg table {} get column failed. error : {}", table.getName(), e);
return true;
}
} else {
return true;
}
}
return root.getInputs().stream().anyMatch(Utils::hasUnknownColumnsStats);
}
public static long getLongFromDateTime(LocalDateTime dateTime) {
return dateTime.atZone(ZoneId.systemDefault()).toInstant().getEpochSecond();
}
public static LocalDateTime getDatetimeFromLong(long dateTime) {
return LocalDateTime.ofInstant(Instant.ofEpochSecond(dateTime), ZoneId.systemDefault());
}
public static long convertBitSetToLong(BitSet bitSet, int length) {
long gid = 0;
for (int b = 0; b < length; ++b) {
gid = gid * 2 + (bitSet.get(b) ? 1 : 0);
}
return gid;
}
public static ColumnRefOperator findSmallestColumnRef(List<ColumnRefOperator> columnRefOperatorList) {
Preconditions.checkState(!columnRefOperatorList.isEmpty());
ColumnRefOperator smallestColumnRef = columnRefOperatorList.get(0);
int smallestColumnLength = Integer.MAX_VALUE;
for (ColumnRefOperator columnRefOperator : columnRefOperatorList) {
Type columnType = columnRefOperator.getType();
if (columnType.isScalarType()) {
int columnLength = columnType.getTypeSize();
if (columnLength < smallestColumnLength) {
smallestColumnRef = columnRefOperator;
smallestColumnLength = columnLength;
}
}
}
return smallestColumnRef;
}
public static boolean canDoReplicatedJoin(OlapTable table, long selectedIndexId,
Collection<Long> selectedPartitionId,
Collection<Long> selectedTabletId) {
ConnectContext ctx = ConnectContext.get();
int backendSize = ctx.getTotalBackendNumber();
int aliveBackendSize = ctx.getAliveBackendNumber();
int schemaHash = table.getSchemaHashByIndexId(selectedIndexId);
for (Long partitionId : selectedPartitionId) {
Partition partition = table.getPartition(partitionId);
if (table.isLakeTable()) {
return false;
}
if (table.getPartitionInfo().getReplicationNum(partitionId) < backendSize) {
return false;
}
long visibleVersion = partition.getVisibleVersion();
MaterializedIndex materializedIndex = partition.getIndex(selectedIndexId);
for (Long id : selectedTabletId) {
LocalTablet tablet = (LocalTablet) materializedIndex.getTablet(id);
if (tablet != null && tablet.getQueryableReplicasSize(visibleVersion, schemaHash)
!= aliveBackendSize) {
return false;
}
}
}
return true;
}
public static boolean isEqualBinaryPredicate(ScalarOperator predicate) {
if (predicate instanceof BinaryPredicateOperator) {
BinaryPredicateOperator binaryPredicate = (BinaryPredicateOperator) predicate;
return binaryPredicate.getBinaryType().isEquivalence();
}
if (predicate instanceof CompoundPredicateOperator) {
CompoundPredicateOperator compoundPredicate = (CompoundPredicateOperator) predicate;
if (compoundPredicate.isAnd()) {
return isEqualBinaryPredicate(compoundPredicate.getChild(0)) &&
isEqualBinaryPredicate(compoundPredicate.getChild(1));
}
return false;
}
return false;
}
/**
* Try cast op to descType, return empty if failed
*/
public static Optional<ScalarOperator> tryCastConstant(ScalarOperator op, Type descType) {
if (!op.isConstantRef() || op.getType().matchesType(descType) || Type.FLOAT.equals(op.getType())
|| descType.equals(Type.FLOAT)) {
return Optional.empty();
}
try {
if (((ConstantOperator) op).isNull()) {
return Optional.of(ConstantOperator.createNull(descType));
}
ConstantOperator result = ((ConstantOperator) op).castToStrictly(descType);
if (result.toString().equalsIgnoreCase(op.toString())) {
return Optional.of(result);
} else if (descType.isDate() && (op.getType().isIntegerType() || op.getType().isStringType())) {
if (op.toString().equalsIgnoreCase(result.toString().replaceAll("-", ""))) {
return Optional.of(result);
}
}
} catch (Exception ignored) {
}
return Optional.empty();
}
public static Optional<ScalarOperator> tryDecimalCastConstant(CastOperator lhs, ConstantOperator rhs) {
Type lhsType = lhs.getType();
Type rhsType = rhs.getType();
Type childType = lhs.getChild(0).getType();
if (!lhsType.isExactNumericType() ||
!rhsType.isExactNumericType() ||
!childType.isExactNumericType()) {
return Optional.empty();
}
if (!Type.isAssignable2Decimal((ScalarType) lhsType, (ScalarType) childType) ||
!Type.isAssignable2Decimal((ScalarType) childType, (ScalarType) rhsType)) {
return Optional.empty();
}
if (rhs.isNull()) {
return Optional.of(ConstantOperator.createNull(childType));
}
try {
ConstantOperator result = rhs.castTo(childType);
return Optional.of(result);
} catch (Exception ignored) {
}
return Optional.empty();
}
public static ScalarOperator transTrue2Null(ScalarOperator predicates) {
if (ConstantOperator.TRUE.equals(predicates)) {
return null;
}
return predicates;
}
public static <T extends ScalarOperator> List<T> collect(ScalarOperator root, Class<T> clazz) {
List<T> output = Lists.newArrayList();
collect(root, clazz, output);
return output;
}
private static <T extends ScalarOperator> void collect(ScalarOperator root, Class<T> clazz, List<T> output) {
if (clazz.isInstance(root)) {
output.add(clazz.cast(root));
}
root.getChildren().forEach(child -> collect(child, clazz, output));
}
public static Set<MaterializedView> getRelatedMvs(int maxLevel, List<Table> tablesToCheck) {
Set<MaterializedView> mvs = Sets.newHashSet();
getRelatedMvs(maxLevel, 0, tablesToCheck, mvs);
return mvs;
}
public static void getRelatedMvs(int maxLevel, int currentLevel, List<Table> tablesToCheck, Set<MaterializedView> mvs) {
if (currentLevel >= maxLevel) {
return;
}
Set<MvId> newMvIds = Sets.newHashSet();
for (Table table : tablesToCheck) {
Set<MvId> mvIds = table.getRelatedMaterializedViews();
if (mvIds != null && !mvIds.isEmpty()) {
newMvIds.addAll(mvIds);
}
}
if (newMvIds.isEmpty()) {
return;
}
List<Table> newMvs = Lists.newArrayList();
for (MvId mvId : newMvIds) {
Database db = GlobalStateMgr.getCurrentState().getDb(mvId.getDbId());
if (db == null) {
continue;
}
Table table = db.getTable(mvId.getId());
if (table == null) {
continue;
}
newMvs.add(table);
mvs.add((MaterializedView) table);
}
getRelatedMvs(maxLevel, currentLevel + 1, newMvs, mvs);
}
public static List<Table> getAllTables(OptExpression root) {
List<Table> tables = Lists.newArrayList();
getAllTables(root, tables);
return tables;
}
private static void getAllTables(OptExpression root, List<Table> tables) {
if (root.getOp() instanceof LogicalScanOperator) {
LogicalScanOperator scanOperator = (LogicalScanOperator) root.getOp();
tables.add(scanOperator.getTable());
} else {
for (OptExpression child : root.getInputs()) {
getAllTables(child, tables);
}
}
}
public static boolean isLogicalSPJG(OptExpression root) {
if (root == null) {
return false;
}
Operator operator = root.getOp();
if (!(operator instanceof LogicalAggregationOperator)) {
return false;
}
LogicalAggregationOperator agg = (LogicalAggregationOperator) operator;
if (agg.getType() != AggType.GLOBAL) {
return false;
}
OptExpression child = root.inputAt(0);
return isLogicalSPJ(child);
}
public static boolean isLogicalSPJ(OptExpression root) {
if (root == null) {
return false;
}
Operator operator = root.getOp();
if (!(operator instanceof LogicalOperator)) {
return false;
}
if (!(operator instanceof LogicalScanOperator)
&& !(operator instanceof LogicalProjectOperator)
&& !(operator instanceof LogicalFilterOperator)
&& !(operator instanceof LogicalJoinOperator)) {
return false;
}
for (OptExpression child : root.getInputs()) {
if (!isLogicalSPJ(child)) {
return false;
}
}
return true;
}
public static Pair<OptExpression, LogicalPlan> getRuleOptimizedLogicalPlan(String sql,
ColumnRefFactory columnRefFactory,
ConnectContext connectContext) {
StatementBase mvStmt;
try {
List<StatementBase> statementBases =
com.starrocks.sql.parser.SqlParser.parse(sql, connectContext.getSessionVariable());
Preconditions.checkState(statementBases.size() == 1);
mvStmt = statementBases.get(0);
} catch (ParsingException parsingException) {
LOG.warn("parse sql:{} failed", sql, parsingException);
return null;
}
Preconditions.checkState(mvStmt instanceof QueryStatement);
Analyzer.analyze(mvStmt, connectContext);
QueryRelation query = ((QueryStatement) mvStmt).getQueryRelation();
LogicalPlan logicalPlan =
new RelationTransformer(columnRefFactory, connectContext).transformWithSelectLimit(query);
OptimizerConfig optimizerConfig = new OptimizerConfig(OptimizerConfig.OptimizerAlgorithm.RULE_BASED);
Optimizer optimizer = new Optimizer(optimizerConfig);
OptExpression optimizedPlan = optimizer.optimize(
connectContext,
logicalPlan.getRoot(),
new PhysicalPropertySet(),
new ColumnRefSet(logicalPlan.getOutputColumn()),
columnRefFactory);
return Pair.create(optimizedPlan, logicalPlan);
}
public static List<OptExpression> collectScanExprs(OptExpression expression) {
List<OptExpression> scanExprs = Lists.newArrayList();
OptExpressionVisitor scanCollector = new OptExpressionVisitor<Void, Void>() {
@Override
public Void visit(OptExpression optExpression, Void context) {
for (OptExpression input : optExpression.getInputs()) {
super.visit(input, context);
}
return null;
}
@Override
public Void visitLogicalTableScan(OptExpression optExpression, Void context) {
scanExprs.add(optExpression);
return null;
}
};
expression.getOp().accept(scanCollector, expression, null);
return scanExprs;
}
} | class Utils {
private static final Logger LOG = LogManager.getLogger(Utils.class);
public static List<ScalarOperator> extractConjuncts(ScalarOperator root) {
LinkedList<ScalarOperator> list = new LinkedList<>();
if (null == root) {
return list;
}
extractConjunctsImpl(root, list);
return list;
}
public static void extractConjunctsImpl(ScalarOperator root, List<ScalarOperator> result) {
if (!OperatorType.COMPOUND.equals(root.getOpType())) {
result.add(root);
return;
}
CompoundPredicateOperator cpo = (CompoundPredicateOperator) root;
if (!cpo.isAnd()) {
result.add(root);
return;
}
extractConjunctsImpl(cpo.getChild(0), result);
extractConjunctsImpl(cpo.getChild(1), result);
}
public static List<ScalarOperator> extractDisjunctive(ScalarOperator root) {
LinkedList<ScalarOperator> list = new LinkedList<>();
if (null == root) {
return list;
}
extractDisjunctiveImpl(root, list);
return list;
}
public static void extractDisjunctiveImpl(ScalarOperator root, List<ScalarOperator> result) {
if (!OperatorType.COMPOUND.equals(root.getOpType())) {
result.add(root);
return;
}
CompoundPredicateOperator cpo = (CompoundPredicateOperator) root;
if (!cpo.isOr()) {
result.add(root);
return;
}
extractDisjunctiveImpl(cpo.getChild(0), result);
extractDisjunctiveImpl(cpo.getChild(1), result);
}
public static List<ColumnRefOperator> extractColumnRef(ScalarOperator root) {
if (null == root || !root.isVariable()) {
return new LinkedList<>();
}
LinkedList<ColumnRefOperator> list = new LinkedList<>();
if (OperatorType.VARIABLE.equals(root.getOpType())) {
list.add((ColumnRefOperator) root);
return list;
}
for (ScalarOperator child : root.getChildren()) {
list.addAll(extractColumnRef(child));
}
return list;
}
public static int countColumnRef(ScalarOperator root) {
return countColumnRef(root, 0);
}
private static int countColumnRef(ScalarOperator root, int count) {
if (null == root || !root.isVariable()) {
return 0;
}
if (OperatorType.VARIABLE.equals(root.getOpType())) {
return 1;
}
for (ScalarOperator child : root.getChildren()) {
count += countColumnRef(child, count);
}
return count;
}
public static void extractOlapScanOperator(GroupExpression groupExpression, List<LogicalOlapScanOperator> list) {
extractOperator(groupExpression, list, p -> OperatorType.LOGICAL_OLAP_SCAN.equals(p.getOpType()));
}
private static <E extends Operator> void extractOperator(GroupExpression root, List<E> list,
Predicate<Operator> lambda) {
if (lambda.test(root.getOp())) {
list.add((E) root.getOp());
return;
}
List<Group> groups = root.getInputs();
for (Group group : groups) {
GroupExpression expression = group.getFirstLogicalExpression();
extractOperator(expression, list, lambda);
}
}
public static boolean containAnyColumnRefs(List<ColumnRefOperator> refs, ScalarOperator operator) {
if (refs.isEmpty() || null == operator) {
return false;
}
if (operator.isColumnRef()) {
return refs.contains(operator);
}
for (ScalarOperator so : operator.getChildren()) {
if (containAnyColumnRefs(refs, so)) {
return true;
}
}
return false;
}
public static boolean containColumnRef(ScalarOperator operator, String column) {
if (null == column || null == operator) {
return false;
}
if (operator.isColumnRef()) {
return ((ColumnRefOperator) operator).getName().equalsIgnoreCase(column);
}
for (ScalarOperator so : operator.getChildren()) {
if (containColumnRef(so, column)) {
return true;
}
}
return false;
}
public static ScalarOperator compoundOr(Collection<ScalarOperator> nodes) {
return createCompound(CompoundPredicateOperator.CompoundType.OR, nodes);
}
public static ScalarOperator compoundOr(ScalarOperator... nodes) {
return createCompound(CompoundPredicateOperator.CompoundType.OR, Arrays.asList(nodes));
}
public static ScalarOperator compoundAnd(Collection<ScalarOperator> nodes) {
return createCompound(CompoundPredicateOperator.CompoundType.AND, nodes);
}
public static ScalarOperator compoundAnd(ScalarOperator... nodes) {
return createCompound(CompoundPredicateOperator.CompoundType.AND, Arrays.asList(nodes));
}
public static ScalarOperator createCompound(CompoundPredicateOperator.CompoundType type,
Collection<ScalarOperator> nodes) {
LinkedList<ScalarOperator> link =
nodes.stream().filter(Objects::nonNull).collect(Collectors.toCollection(Lists::newLinkedList));
if (link.size() < 1) {
return null;
}
if (link.size() == 1) {
return link.get(0);
}
while (link.size() > 1) {
LinkedList<ScalarOperator> buffer = new LinkedList<>();
while (link.size() >= 2) {
buffer.add(new CompoundPredicateOperator(type, link.poll(), link.poll()));
}
if (!link.isEmpty()) {
buffer.add(link.remove());
}
link = buffer;
}
return link.remove();
}
public static boolean isInnerOrCrossJoin(Operator operator) {
if (operator instanceof LogicalJoinOperator) {
LogicalJoinOperator joinOperator = (LogicalJoinOperator) operator;
return joinOperator.isInnerOrCrossJoin();
}
return false;
}
public static int countInnerJoinNodeSize(OptExpression root) {
int count = 0;
Operator operator = root.getOp();
for (OptExpression child : root.getInputs()) {
if (isInnerOrCrossJoin(operator) && ((LogicalJoinOperator) operator).getJoinHint().isEmpty()) {
count += countInnerJoinNodeSize(child);
} else {
count = Math.max(count, countInnerJoinNodeSize(child));
}
}
if (isInnerOrCrossJoin(operator) && ((LogicalJoinOperator) operator).getJoinHint().isEmpty()) {
count += 1;
}
return count;
}
public static boolean capableSemiReorder(OptExpression root, boolean hasSemi, int joinNum, int maxJoin) {
Operator operator = root.getOp();
if (operator instanceof LogicalJoinOperator) {
if (((LogicalJoinOperator) operator).getJoinType().isSemiAntiJoin()) {
hasSemi = true;
} else {
joinNum = joinNum + 1;
}
if (joinNum > maxJoin && hasSemi) {
return false;
}
}
for (OptExpression child : root.getInputs()) {
if (operator instanceof LogicalJoinOperator) {
if (!capableSemiReorder(child, hasSemi, joinNum, maxJoin)) {
return false;
}
} else {
if (!capableSemiReorder(child, false, 0, maxJoin)) {
return false;
}
}
}
return true;
}
public static boolean hasUnknownColumnsStats(OptExpression root) {
Operator operator = root.getOp();
if (operator instanceof LogicalScanOperator) {
LogicalScanOperator scanOperator = (LogicalScanOperator) operator;
List<String> colNames =
scanOperator.getColRefToColumnMetaMap().values().stream().map(Column::getName).collect(
Collectors.toList());
if (operator instanceof LogicalOlapScanOperator) {
Table table = scanOperator.getTable();
if (table instanceof OlapTable) {
if (KeysType.AGG_KEYS.equals(((OlapTable) table).getKeysType())) {
List<String> keyColumnNames =
scanOperator.getColRefToColumnMetaMap().values().stream().filter(Column::isKey)
.map(Column::getName)
.collect(Collectors.toList());
List<ColumnStatistic> keyColumnStatisticList =
GlobalStateMgr.getCurrentStatisticStorage().getColumnStatistics(table, keyColumnNames);
return keyColumnStatisticList.stream().anyMatch(ColumnStatistic::isUnknown);
}
}
List<ColumnStatistic> columnStatisticList =
GlobalStateMgr.getCurrentStatisticStorage().getColumnStatistics(table, colNames);
return columnStatisticList.stream().anyMatch(ColumnStatistic::isUnknown);
} else if (operator instanceof LogicalHiveScanOperator || operator instanceof LogicalHudiScanOperator) {
if (ConnectContext.get().getSessionVariable().enableHiveColumnStats()) {
if (operator instanceof LogicalHiveScanOperator) {
return ((LogicalHiveScanOperator) operator).hasUnknownColumn();
} else {
return ((LogicalHudiScanOperator) operator).hasUnknownColumn();
}
}
return true;
} else if (operator instanceof LogicalIcebergScanOperator) {
IcebergTable table = (IcebergTable) scanOperator.getTable();
try {
List<ColumnStatistic> columnStatisticList = IcebergTableStatisticCalculator.getColumnStatistics(
new ArrayList<>(), table.getIcebergTable(),
scanOperator.getColRefToColumnMetaMap());
return columnStatisticList.stream().anyMatch(ColumnStatistic::isUnknown);
} catch (Exception e) {
LOG.warn("Iceberg table {} get column failed. error : {}", table.getName(), e);
return true;
}
} else {
return true;
}
}
return root.getInputs().stream().anyMatch(Utils::hasUnknownColumnsStats);
}
public static long getLongFromDateTime(LocalDateTime dateTime) {
return dateTime.atZone(ZoneId.systemDefault()).toInstant().getEpochSecond();
}
public static LocalDateTime getDatetimeFromLong(long dateTime) {
return LocalDateTime.ofInstant(Instant.ofEpochSecond(dateTime), ZoneId.systemDefault());
}
public static long convertBitSetToLong(BitSet bitSet, int length) {
long gid = 0;
for (int b = 0; b < length; ++b) {
gid = gid * 2 + (bitSet.get(b) ? 1 : 0);
}
return gid;
}
public static ColumnRefOperator findSmallestColumnRef(List<ColumnRefOperator> columnRefOperatorList) {
Preconditions.checkState(!columnRefOperatorList.isEmpty());
ColumnRefOperator smallestColumnRef = columnRefOperatorList.get(0);
int smallestColumnLength = Integer.MAX_VALUE;
for (ColumnRefOperator columnRefOperator : columnRefOperatorList) {
Type columnType = columnRefOperator.getType();
if (columnType.isScalarType()) {
int columnLength = columnType.getTypeSize();
if (columnLength < smallestColumnLength) {
smallestColumnRef = columnRefOperator;
smallestColumnLength = columnLength;
}
}
}
return smallestColumnRef;
}
public static boolean canDoReplicatedJoin(OlapTable table, long selectedIndexId,
Collection<Long> selectedPartitionId,
Collection<Long> selectedTabletId) {
ConnectContext ctx = ConnectContext.get();
int backendSize = ctx.getTotalBackendNumber();
int aliveBackendSize = ctx.getAliveBackendNumber();
int schemaHash = table.getSchemaHashByIndexId(selectedIndexId);
for (Long partitionId : selectedPartitionId) {
Partition partition = table.getPartition(partitionId);
if (table.isLakeTable()) {
return false;
}
if (table.getPartitionInfo().getReplicationNum(partitionId) < backendSize) {
return false;
}
long visibleVersion = partition.getVisibleVersion();
MaterializedIndex materializedIndex = partition.getIndex(selectedIndexId);
for (Long id : selectedTabletId) {
LocalTablet tablet = (LocalTablet) materializedIndex.getTablet(id);
if (tablet != null && tablet.getQueryableReplicasSize(visibleVersion, schemaHash)
!= aliveBackendSize) {
return false;
}
}
}
return true;
}
public static boolean isEqualBinaryPredicate(ScalarOperator predicate) {
if (predicate instanceof BinaryPredicateOperator) {
BinaryPredicateOperator binaryPredicate = (BinaryPredicateOperator) predicate;
return binaryPredicate.getBinaryType().isEquivalence();
}
if (predicate instanceof CompoundPredicateOperator) {
CompoundPredicateOperator compoundPredicate = (CompoundPredicateOperator) predicate;
if (compoundPredicate.isAnd()) {
return isEqualBinaryPredicate(compoundPredicate.getChild(0)) &&
isEqualBinaryPredicate(compoundPredicate.getChild(1));
}
return false;
}
return false;
}
/**
* Try cast op to descType, return empty if failed
*/
public static Optional<ScalarOperator> tryCastConstant(ScalarOperator op, Type descType) {
if (!op.isConstantRef() || op.getType().matchesType(descType) || Type.FLOAT.equals(op.getType())
|| descType.equals(Type.FLOAT)) {
return Optional.empty();
}
try {
if (((ConstantOperator) op).isNull()) {
return Optional.of(ConstantOperator.createNull(descType));
}
ConstantOperator result = ((ConstantOperator) op).castToStrictly(descType);
if (result.toString().equalsIgnoreCase(op.toString())) {
return Optional.of(result);
} else if (descType.isDate() && (op.getType().isIntegerType() || op.getType().isStringType())) {
if (op.toString().equalsIgnoreCase(result.toString().replaceAll("-", ""))) {
return Optional.of(result);
}
}
} catch (Exception ignored) {
}
return Optional.empty();
}
public static Optional<ScalarOperator> tryDecimalCastConstant(CastOperator lhs, ConstantOperator rhs) {
Type lhsType = lhs.getType();
Type rhsType = rhs.getType();
Type childType = lhs.getChild(0).getType();
if (!lhsType.isExactNumericType() ||
!rhsType.isExactNumericType() ||
!childType.isExactNumericType()) {
return Optional.empty();
}
if (!Type.isAssignable2Decimal((ScalarType) lhsType, (ScalarType) childType) ||
!Type.isAssignable2Decimal((ScalarType) childType, (ScalarType) rhsType)) {
return Optional.empty();
}
if (rhs.isNull()) {
return Optional.of(ConstantOperator.createNull(childType));
}
try {
ConstantOperator result = rhs.castTo(childType);
return Optional.of(result);
} catch (Exception ignored) {
}
return Optional.empty();
}
public static ScalarOperator transTrue2Null(ScalarOperator predicates) {
if (ConstantOperator.TRUE.equals(predicates)) {
return null;
}
return predicates;
}
public static <T extends ScalarOperator> List<T> collect(ScalarOperator root, Class<T> clazz) {
List<T> output = Lists.newArrayList();
collect(root, clazz, output);
return output;
}
private static <T extends ScalarOperator> void collect(ScalarOperator root, Class<T> clazz, List<T> output) {
if (clazz.isInstance(root)) {
output.add(clazz.cast(root));
}
root.getChildren().forEach(child -> collect(child, clazz, output));
}
public static Set<MaterializedView> getRelatedMvs(int maxLevel, List<Table> tablesToCheck) {
Set<MaterializedView> mvs = Sets.newHashSet();
getRelatedMvs(maxLevel, 0, tablesToCheck, mvs);
return mvs;
}
public static void getRelatedMvs(int maxLevel, int currentLevel, List<Table> tablesToCheck, Set<MaterializedView> mvs) {
if (currentLevel >= maxLevel) {
return;
}
Set<MvId> newMvIds = Sets.newHashSet();
for (Table table : tablesToCheck) {
Set<MvId> mvIds = table.getRelatedMaterializedViews();
if (mvIds != null && !mvIds.isEmpty()) {
newMvIds.addAll(mvIds);
}
}
if (newMvIds.isEmpty()) {
return;
}
List<Table> newMvs = Lists.newArrayList();
for (MvId mvId : newMvIds) {
Database db = GlobalStateMgr.getCurrentState().getDb(mvId.getDbId());
if (db == null) {
continue;
}
Table table = db.getTable(mvId.getId());
if (table == null) {
continue;
}
newMvs.add(table);
mvs.add((MaterializedView) table);
}
getRelatedMvs(maxLevel, currentLevel + 1, newMvs, mvs);
}
public static List<Table> getAllTables(OptExpression root) {
List<Table> tables = Lists.newArrayList();
getAllTables(root, tables);
return tables;
}
private static void getAllTables(OptExpression root, List<Table> tables) {
if (root.getOp() instanceof LogicalScanOperator) {
LogicalScanOperator scanOperator = (LogicalScanOperator) root.getOp();
tables.add(scanOperator.getTable());
} else {
for (OptExpression child : root.getInputs()) {
getAllTables(child, tables);
}
}
}
public static boolean isLogicalSPJG(OptExpression root) {
if (root == null) {
return false;
}
Operator operator = root.getOp();
if (!(operator instanceof LogicalAggregationOperator)) {
return false;
}
LogicalAggregationOperator agg = (LogicalAggregationOperator) operator;
if (agg.getType() != AggType.GLOBAL) {
return false;
}
OptExpression child = root.inputAt(0);
return isLogicalSPJ(child);
}
public static boolean isLogicalSPJ(OptExpression root) {
if (root == null) {
return false;
}
Operator operator = root.getOp();
if (!(operator instanceof LogicalOperator)) {
return false;
}
if (!(operator instanceof LogicalScanOperator)
&& !(operator instanceof LogicalProjectOperator)
&& !(operator instanceof LogicalFilterOperator)
&& !(operator instanceof LogicalJoinOperator)) {
return false;
}
for (OptExpression child : root.getInputs()) {
if (!isLogicalSPJ(child)) {
return false;
}
}
return true;
}
public static Pair<OptExpression, LogicalPlan> getRuleOptimizedLogicalPlan(String sql,
ColumnRefFactory columnRefFactory,
ConnectContext connectContext) {
StatementBase mvStmt;
try {
List<StatementBase> statementBases =
com.starrocks.sql.parser.SqlParser.parse(sql, connectContext.getSessionVariable());
Preconditions.checkState(statementBases.size() == 1);
mvStmt = statementBases.get(0);
} catch (ParsingException parsingException) {
LOG.warn("parse sql:{} failed", sql, parsingException);
return null;
}
Preconditions.checkState(mvStmt instanceof QueryStatement);
Analyzer.analyze(mvStmt, connectContext);
QueryRelation query = ((QueryStatement) mvStmt).getQueryRelation();
LogicalPlan logicalPlan =
new RelationTransformer(columnRefFactory, connectContext).transformWithSelectLimit(query);
OptimizerConfig optimizerConfig = new OptimizerConfig(OptimizerConfig.OptimizerAlgorithm.RULE_BASED);
Optimizer optimizer = new Optimizer(optimizerConfig);
OptExpression optimizedPlan = optimizer.optimize(
connectContext,
logicalPlan.getRoot(),
new PhysicalPropertySet(),
new ColumnRefSet(logicalPlan.getOutputColumn()),
columnRefFactory);
return Pair.create(optimizedPlan, logicalPlan);
}
public static List<OptExpression> collectScanExprs(OptExpression expression) {
List<OptExpression> scanExprs = Lists.newArrayList();
OptExpressionVisitor scanCollector = new OptExpressionVisitor<Void, Void>() {
@Override
public Void visit(OptExpression optExpression, Void context) {
for (OptExpression input : optExpression.getInputs()) {
super.visit(input, context);
}
return null;
}
@Override
public Void visitLogicalTableScan(OptExpression optExpression, Void context) {
scanExprs.add(optExpression);
return null;
}
};
expression.getOp().accept(scanCollector, expression, null);
return scanExprs;
}
} |
I wonder if you should also implement the other readonly operations, like values, entrySet, equals, hashCode too ? | public Collection<V> values() {
throw new UnsupportedOperationException();
} | throw new UnsupportedOperationException(); | public Collection<V> values() {
throw new UnsupportedOperationException();
} | class ChainedMap<K, V> implements Map<K, V> {
private final Map<K, V> primary, secondary;
ChainedMap(Map<K, V> primary, Map<K, V> secondary) {
this.primary = primary;
this.secondary = secondary;
}
@Override
public int size() {
return (primary.size() >= secondary.size())
? countUnique(primary, secondary)
: countUnique(secondary, primary);
}
private int countUnique(Map<K, V> large, Map<K,V> small) {
int size = large.size();
for (K key : small.keySet()) {
if ( ! large.containsKey(key)) size++;
}
return size;
}
@Override
public boolean isEmpty() {
return primary.isEmpty() && secondary.isEmpty();
}
@Override
public boolean containsKey(Object key) {
return primary.containsKey(key) || secondary.containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return primary.containsValue(value) || secondary.containsValue(value);
}
@Override
public V get(Object key) {
V value = primary.get(key);
return value != null ? value : secondary.get(key);
}
@Override
public V put(K key, V value) {
throw new UnsupportedOperationException();
}
@Override
public V remove(Object key) {
throw new UnsupportedOperationException();
}
@Override
public void putAll(Map<? extends K, ? extends V> m) {
throw new UnsupportedOperationException();
}
@Override
public void clear() {
throw new UnsupportedOperationException();
}
@Override
public Set<K> keySet() {
var keys = new HashSet<>(secondary.keySet());
keys.addAll(primary.keySet());
return keys;
}
@Override
@Override
public Set<Entry<K, V>> entrySet() {
throw new UnsupportedOperationException();
}
@Override
public boolean equals(Object o) {
throw new UnsupportedOperationException();
}
@Override
public int hashCode() {
throw new UnsupportedOperationException();
}
} | class ChainedMap<K, V> implements Map<K, V> {
private final Map<K, V> primary, secondary;
ChainedMap(Map<K, V> primary, Map<K, V> secondary) {
this.primary = primary;
this.secondary = secondary;
}
@Override
public int size() {
return (primary.size() >= secondary.size())
? countUnique(primary, secondary)
: countUnique(secondary, primary);
}
private int countUnique(Map<K, V> large, Map<K,V> small) {
int size = large.size();
for (K key : small.keySet()) {
if ( ! large.containsKey(key)) size++;
}
return size;
}
@Override
public boolean isEmpty() {
return primary.isEmpty() && secondary.isEmpty();
}
@Override
public boolean containsKey(Object key) {
return primary.containsKey(key) || secondary.containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return primary.containsValue(value) || secondary.containsValue(value);
}
@Override
public V get(Object key) {
V value = primary.get(key);
return value != null ? value : secondary.get(key);
}
@Override
public V put(K key, V value) {
throw new UnsupportedOperationException();
}
@Override
public V remove(Object key) {
throw new UnsupportedOperationException();
}
@Override
public void putAll(Map<? extends K, ? extends V> m) {
throw new UnsupportedOperationException();
}
@Override
public void clear() {
throw new UnsupportedOperationException();
}
@Override
public Set<K> keySet() {
var keys = new HashSet<>(secondary.keySet());
keys.addAll(primary.keySet());
return keys;
}
@Override
@Override
public Set<Entry<K, V>> entrySet() {
throw new UnsupportedOperationException();
}
@Override
public boolean equals(Object o) {
throw new UnsupportedOperationException();
}
@Override
public int hashCode() {
throw new UnsupportedOperationException();
}
} |
Yes, it's not. | public int size() {
return keySet().size();
} | return keySet().size(); | public int size() {
return (primary.size() >= secondary.size())
? countUnique(primary, secondary)
: countUnique(secondary, primary);
} | class ChainedMap<K, V> implements Map<K, V> {
private final Map<K, V> primary, secondary;
ChainedMap(Map<K, V> primary, Map<K, V> secondary) {
this.primary = primary;
this.secondary = secondary;
}
@Override
@Override
public boolean isEmpty() {
return primary.isEmpty() && secondary.isEmpty();
}
@Override
public boolean containsKey(Object key) {
return primary.containsKey(key) || secondary.containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return primary.containsValue(value) || secondary.containsValue(value);
}
@Override
public V get(Object key) {
V value = primary.get(key);
return value != null ? value : secondary.get(key);
}
@Override
public V put(K key, V value) {
throw new UnsupportedOperationException();
}
@Override
public V remove(Object key) {
throw new UnsupportedOperationException();
}
@Override
public void putAll(Map<? extends K, ? extends V> m) {
throw new UnsupportedOperationException();
}
@Override
public void clear() {
throw new UnsupportedOperationException();
}
@Override
public Set<K> keySet() {
var keys = new HashSet<>(secondary.keySet());
keys.addAll(primary.keySet());
return keys;
}
@Override
public Collection<V> values() {
throw new UnsupportedOperationException();
}
@Override
public Set<Entry<K, V>> entrySet() {
throw new UnsupportedOperationException();
}
@Override
public boolean equals(Object o) {
throw new UnsupportedOperationException();
}
@Override
public int hashCode() {
throw new UnsupportedOperationException();
}
} | class ChainedMap<K, V> implements Map<K, V> {
private final Map<K, V> primary, secondary;
ChainedMap(Map<K, V> primary, Map<K, V> secondary) {
this.primary = primary;
this.secondary = secondary;
}
@Override
private int countUnique(Map<K, V> large, Map<K,V> small) {
int size = large.size();
for (K key : small.keySet()) {
if ( ! large.containsKey(key)) size++;
}
return size;
}
@Override
public boolean isEmpty() {
return primary.isEmpty() && secondary.isEmpty();
}
@Override
public boolean containsKey(Object key) {
return primary.containsKey(key) || secondary.containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return primary.containsValue(value) || secondary.containsValue(value);
}
@Override
public V get(Object key) {
V value = primary.get(key);
return value != null ? value : secondary.get(key);
}
@Override
public V put(K key, V value) {
throw new UnsupportedOperationException();
}
@Override
public V remove(Object key) {
throw new UnsupportedOperationException();
}
@Override
public void putAll(Map<? extends K, ? extends V> m) {
throw new UnsupportedOperationException();
}
@Override
public void clear() {
throw new UnsupportedOperationException();
}
@Override
public Set<K> keySet() {
var keys = new HashSet<>(secondary.keySet());
keys.addAll(primary.keySet());
return keys;
}
@Override
public Collection<V> values() {
throw new UnsupportedOperationException();
}
@Override
public Set<Entry<K, V>> entrySet() {
throw new UnsupportedOperationException();
}
@Override
public boolean equals(Object o) {
throw new UnsupportedOperationException();
}
@Override
public int hashCode() {
throw new UnsupportedOperationException();
}
} |
They aren't used, so I don't think so. | public Collection<V> values() {
throw new UnsupportedOperationException();
} | throw new UnsupportedOperationException(); | public Collection<V> values() {
throw new UnsupportedOperationException();
} | class ChainedMap<K, V> implements Map<K, V> {
private final Map<K, V> primary, secondary;
ChainedMap(Map<K, V> primary, Map<K, V> secondary) {
this.primary = primary;
this.secondary = secondary;
}
@Override
public int size() {
return (primary.size() >= secondary.size())
? countUnique(primary, secondary)
: countUnique(secondary, primary);
}
private int countUnique(Map<K, V> large, Map<K,V> small) {
int size = large.size();
for (K key : small.keySet()) {
if ( ! large.containsKey(key)) size++;
}
return size;
}
@Override
public boolean isEmpty() {
return primary.isEmpty() && secondary.isEmpty();
}
@Override
public boolean containsKey(Object key) {
return primary.containsKey(key) || secondary.containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return primary.containsValue(value) || secondary.containsValue(value);
}
@Override
public V get(Object key) {
V value = primary.get(key);
return value != null ? value : secondary.get(key);
}
@Override
public V put(K key, V value) {
throw new UnsupportedOperationException();
}
@Override
public V remove(Object key) {
throw new UnsupportedOperationException();
}
@Override
public void putAll(Map<? extends K, ? extends V> m) {
throw new UnsupportedOperationException();
}
@Override
public void clear() {
throw new UnsupportedOperationException();
}
@Override
public Set<K> keySet() {
var keys = new HashSet<>(secondary.keySet());
keys.addAll(primary.keySet());
return keys;
}
@Override
@Override
public Set<Entry<K, V>> entrySet() {
throw new UnsupportedOperationException();
}
@Override
public boolean equals(Object o) {
throw new UnsupportedOperationException();
}
@Override
public int hashCode() {
throw new UnsupportedOperationException();
}
} | class ChainedMap<K, V> implements Map<K, V> {
private final Map<K, V> primary, secondary;
ChainedMap(Map<K, V> primary, Map<K, V> secondary) {
this.primary = primary;
this.secondary = secondary;
}
@Override
public int size() {
return (primary.size() >= secondary.size())
? countUnique(primary, secondary)
: countUnique(secondary, primary);
}
private int countUnique(Map<K, V> large, Map<K,V> small) {
int size = large.size();
for (K key : small.keySet()) {
if ( ! large.containsKey(key)) size++;
}
return size;
}
@Override
public boolean isEmpty() {
return primary.isEmpty() && secondary.isEmpty();
}
@Override
public boolean containsKey(Object key) {
return primary.containsKey(key) || secondary.containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return primary.containsValue(value) || secondary.containsValue(value);
}
@Override
public V get(Object key) {
V value = primary.get(key);
return value != null ? value : secondary.get(key);
}
@Override
public V put(K key, V value) {
throw new UnsupportedOperationException();
}
@Override
public V remove(Object key) {
throw new UnsupportedOperationException();
}
@Override
public void putAll(Map<? extends K, ? extends V> m) {
throw new UnsupportedOperationException();
}
@Override
public void clear() {
throw new UnsupportedOperationException();
}
@Override
public Set<K> keySet() {
var keys = new HashSet<>(secondary.keySet());
keys.addAll(primary.keySet());
return keys;
}
@Override
@Override
public Set<Entry<K, V>> entrySet() {
throw new UnsupportedOperationException();
}
@Override
public boolean equals(Object o) {
throw new UnsupportedOperationException();
}
@Override
public int hashCode() {
throw new UnsupportedOperationException();
}
} |
What about zstandard ? | private OutputStream compressedOutputStream(File outputFile) throws IOException {
log.log(Level.FINE, () -> "Compressing with type " + type + " and compression type " + compressionType);
switch (type) {
case compressed:
switch (compressionType) {
case gzip:
return new GZIPOutputStream(new FileOutputStream(outputFile));
case lz4:
return new LZ4BlockOutputStream(new FileOutputStream(outputFile));
default:
throw new RuntimeException("Unknown compression type " + compressionType);
}
case file:
return new FileOutputStream(outputFile);
default:
throw new RuntimeException("Unknown file reference type " + type);
}
} | default: | private OutputStream compressedOutputStream(File outputFile) throws IOException {
log.log(Level.FINE, () -> "Compressing with type " + type + " and compression type " + compressionType);
switch (type) {
case compressed:
switch (compressionType) {
case gzip:
return new GZIPOutputStream(new FileOutputStream(outputFile));
case lz4:
return new LZ4BlockOutputStream(new FileOutputStream(outputFile));
default:
throw new RuntimeException("Unknown compression type " + compressionType);
}
case file:
return new FileOutputStream(outputFile);
default:
throw new RuntimeException("Unknown file reference type " + type);
}
} | class FileReferenceCompressor {
private static final Logger log = Logger.getLogger(FileReferenceCompressor.class.getName());
private static final int recurseDepth = 100;
private final FileReferenceData.Type type;
private final FileReferenceData.CompressionType compressionType;
public FileReferenceCompressor(FileReferenceData.Type type, FileReferenceData.CompressionType compressionType) {
this.type = Objects.requireNonNull(type, "Type cannot be null");
this.compressionType = Objects.requireNonNull(compressionType, "Compression type cannot be null");
}
public File compress(File baseDir, List<File> inputFiles, File outputFile) throws IOException {
TarArchiveOutputStream archiveOutputStream = new TarArchiveOutputStream(compressedOutputStream(outputFile));
archiveOutputStream.setLongFileMode(TarArchiveOutputStream.LONGFILE_POSIX);
createArchiveFile(archiveOutputStream, baseDir, inputFiles);
return outputFile;
}
public File compress(File directory, File outputFile) throws IOException {
return compress(directory,
Files.find(Paths.get(directory.getAbsolutePath()),
recurseDepth,
(p, basicFileAttributes) -> basicFileAttributes.isRegularFile())
.map(Path::toFile).collect(Collectors.toList()),
outputFile);
}
public void decompress(File inputFile, File outputDir) throws IOException {
log.log(Level.FINE, () -> "Decompressing '" + inputFile + "' into '" + outputDir + "'");
try (ArchiveInputStream ais = new TarArchiveInputStream(decompressedInputStream(inputFile))) {
decompress(ais, outputDir);
} catch (IllegalArgumentException e) {
throw new RuntimeException("Unable to decompress '" + inputFile.getAbsolutePath() + "': " + e.getMessage());
}
}
private static void decompress(ArchiveInputStream archiveInputStream, File outputFile) throws IOException {
int entries = 0;
ArchiveEntry entry;
while ((entry = archiveInputStream.getNextEntry()) != null) {
File outFile = new File(outputFile, entry.getName());
if (entry.isDirectory()) {
if (!(outFile.exists() && outFile.isDirectory())) {
log.log(Level.FINE, () -> "Creating dir: " + outFile.getAbsolutePath());
if (!outFile.mkdirs()) {
log.log(Level.WARNING, "Could not create dir " + entry.getName());
}
}
} else {
File parent = new File(outFile.getParent());
if (!parent.exists() && !parent.mkdirs()) {
log.log(Level.WARNING, "Could not create dir " + parent.getAbsolutePath());
}
FileOutputStream fos = new FileOutputStream(outFile);
ByteStreams.copy(archiveInputStream, fos);
fos.close();
}
entries++;
}
if (entries == 0) {
throw new IllegalArgumentException("Not able to read any entries from stream (" +
archiveInputStream.getBytesRead() + " bytes read from stream)");
}
}
private static void createArchiveFile(ArchiveOutputStream archiveOutputStream, File baseDir, List<File> inputFiles) throws IOException {
inputFiles.forEach(file -> {
try {
writeFileToTar(archiveOutputStream, baseDir, file);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
archiveOutputStream.close();
}
private static void writeFileToTar(ArchiveOutputStream taos, File baseDir, File file) throws IOException {
log.log(Level.FINEST, () -> "Adding file to tar: " + baseDir.toPath().relativize(file.toPath()).toString());
taos.putArchiveEntry(taos.createArchiveEntry(file, baseDir.toPath().relativize(file.toPath()).toString()));
ByteStreams.copy(new FileInputStream(file), taos);
taos.closeArchiveEntry();
}
private InputStream decompressedInputStream(File inputFile) throws IOException {
log.log(Level.FINE, () -> "Decompressing with type " + type + " and compression type " + compressionType);
switch (type) {
case compressed:
switch (compressionType) {
case gzip:
return new GZIPInputStream(new FileInputStream(inputFile));
case lz4:
return new LZ4BlockInputStream(new FileInputStream(inputFile));
default:
throw new RuntimeException("Unknown compression type " + compressionType);
}
case file:
return new FileInputStream(inputFile);
default:
throw new RuntimeException("Unknown file reference type " + type);
}
}
} | class FileReferenceCompressor {
private static final Logger log = Logger.getLogger(FileReferenceCompressor.class.getName());
private static final int recurseDepth = 100;
private final FileReferenceData.Type type;
private final FileReferenceData.CompressionType compressionType;
public FileReferenceCompressor(FileReferenceData.Type type, FileReferenceData.CompressionType compressionType) {
this.type = Objects.requireNonNull(type, "Type cannot be null");
this.compressionType = Objects.requireNonNull(compressionType, "Compression type cannot be null");
}
public File compress(File baseDir, List<File> inputFiles, File outputFile) throws IOException {
TarArchiveOutputStream archiveOutputStream = new TarArchiveOutputStream(compressedOutputStream(outputFile));
archiveOutputStream.setLongFileMode(TarArchiveOutputStream.LONGFILE_POSIX);
createArchiveFile(archiveOutputStream, baseDir, inputFiles);
return outputFile;
}
public File compress(File directory, File outputFile) throws IOException {
return compress(directory,
Files.find(Paths.get(directory.getAbsolutePath()),
recurseDepth,
(p, basicFileAttributes) -> basicFileAttributes.isRegularFile())
.map(Path::toFile).collect(Collectors.toList()),
outputFile);
}
public void decompress(File inputFile, File outputDir) throws IOException {
log.log(Level.FINE, () -> "Decompressing '" + inputFile + "' into '" + outputDir + "'");
try (ArchiveInputStream ais = new TarArchiveInputStream(decompressedInputStream(inputFile))) {
decompress(ais, outputDir);
} catch (IllegalArgumentException e) {
throw new RuntimeException("Unable to decompress '" + inputFile.getAbsolutePath() + "': " + e.getMessage());
}
}
private static void decompress(ArchiveInputStream archiveInputStream, File outputFile) throws IOException {
int entries = 0;
ArchiveEntry entry;
while ((entry = archiveInputStream.getNextEntry()) != null) {
File outFile = new File(outputFile, entry.getName());
if (entry.isDirectory()) {
if (!(outFile.exists() && outFile.isDirectory())) {
log.log(Level.FINE, () -> "Creating dir: " + outFile.getAbsolutePath());
if (!outFile.mkdirs()) {
log.log(Level.WARNING, "Could not create dir " + entry.getName());
}
}
} else {
File parent = new File(outFile.getParent());
if (!parent.exists() && !parent.mkdirs()) {
log.log(Level.WARNING, "Could not create dir " + parent.getAbsolutePath());
}
FileOutputStream fos = new FileOutputStream(outFile);
ByteStreams.copy(archiveInputStream, fos);
fos.close();
}
entries++;
}
if (entries == 0) {
throw new IllegalArgumentException("Not able to read any entries from stream (" +
archiveInputStream.getBytesRead() + " bytes read from stream)");
}
}
private static void createArchiveFile(ArchiveOutputStream archiveOutputStream, File baseDir, List<File> inputFiles) throws IOException {
inputFiles.forEach(file -> {
try {
writeFileToTar(archiveOutputStream, baseDir, file);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
archiveOutputStream.close();
}
private static void writeFileToTar(ArchiveOutputStream taos, File baseDir, File file) throws IOException {
log.log(Level.FINEST, () -> "Adding file to tar: " + baseDir.toPath().relativize(file.toPath()).toString());
taos.putArchiveEntry(taos.createArchiveEntry(file, baseDir.toPath().relativize(file.toPath()).toString()));
ByteStreams.copy(new FileInputStream(file), taos);
taos.closeArchiveEntry();
}
private InputStream decompressedInputStream(File inputFile) throws IOException {
log.log(Level.FINE, () -> "Decompressing with type " + type + " and compression type " + compressionType);
switch (type) {
case compressed:
switch (compressionType) {
case gzip:
return new GZIPInputStream(new FileInputStream(inputFile));
case lz4:
return new LZ4BlockInputStream(new FileInputStream(inputFile));
default:
throw new RuntimeException("Unknown compression type " + compressionType);
}
case file:
return new FileInputStream(inputFile);
default:
throw new RuntimeException("Unknown file reference type " + type);
}
}
} |
output and input streams for zstandard not available yet, work in progress | private OutputStream compressedOutputStream(File outputFile) throws IOException {
log.log(Level.FINE, () -> "Compressing with type " + type + " and compression type " + compressionType);
switch (type) {
case compressed:
switch (compressionType) {
case gzip:
return new GZIPOutputStream(new FileOutputStream(outputFile));
case lz4:
return new LZ4BlockOutputStream(new FileOutputStream(outputFile));
default:
throw new RuntimeException("Unknown compression type " + compressionType);
}
case file:
return new FileOutputStream(outputFile);
default:
throw new RuntimeException("Unknown file reference type " + type);
}
} | default: | private OutputStream compressedOutputStream(File outputFile) throws IOException {
log.log(Level.FINE, () -> "Compressing with type " + type + " and compression type " + compressionType);
switch (type) {
case compressed:
switch (compressionType) {
case gzip:
return new GZIPOutputStream(new FileOutputStream(outputFile));
case lz4:
return new LZ4BlockOutputStream(new FileOutputStream(outputFile));
default:
throw new RuntimeException("Unknown compression type " + compressionType);
}
case file:
return new FileOutputStream(outputFile);
default:
throw new RuntimeException("Unknown file reference type " + type);
}
} | class FileReferenceCompressor {
private static final Logger log = Logger.getLogger(FileReferenceCompressor.class.getName());
private static final int recurseDepth = 100;
private final FileReferenceData.Type type;
private final FileReferenceData.CompressionType compressionType;
public FileReferenceCompressor(FileReferenceData.Type type, FileReferenceData.CompressionType compressionType) {
this.type = Objects.requireNonNull(type, "Type cannot be null");
this.compressionType = Objects.requireNonNull(compressionType, "Compression type cannot be null");
}
public File compress(File baseDir, List<File> inputFiles, File outputFile) throws IOException {
TarArchiveOutputStream archiveOutputStream = new TarArchiveOutputStream(compressedOutputStream(outputFile));
archiveOutputStream.setLongFileMode(TarArchiveOutputStream.LONGFILE_POSIX);
createArchiveFile(archiveOutputStream, baseDir, inputFiles);
return outputFile;
}
public File compress(File directory, File outputFile) throws IOException {
return compress(directory,
Files.find(Paths.get(directory.getAbsolutePath()),
recurseDepth,
(p, basicFileAttributes) -> basicFileAttributes.isRegularFile())
.map(Path::toFile).collect(Collectors.toList()),
outputFile);
}
public void decompress(File inputFile, File outputDir) throws IOException {
log.log(Level.FINE, () -> "Decompressing '" + inputFile + "' into '" + outputDir + "'");
try (ArchiveInputStream ais = new TarArchiveInputStream(decompressedInputStream(inputFile))) {
decompress(ais, outputDir);
} catch (IllegalArgumentException e) {
throw new RuntimeException("Unable to decompress '" + inputFile.getAbsolutePath() + "': " + e.getMessage());
}
}
private static void decompress(ArchiveInputStream archiveInputStream, File outputFile) throws IOException {
int entries = 0;
ArchiveEntry entry;
while ((entry = archiveInputStream.getNextEntry()) != null) {
File outFile = new File(outputFile, entry.getName());
if (entry.isDirectory()) {
if (!(outFile.exists() && outFile.isDirectory())) {
log.log(Level.FINE, () -> "Creating dir: " + outFile.getAbsolutePath());
if (!outFile.mkdirs()) {
log.log(Level.WARNING, "Could not create dir " + entry.getName());
}
}
} else {
File parent = new File(outFile.getParent());
if (!parent.exists() && !parent.mkdirs()) {
log.log(Level.WARNING, "Could not create dir " + parent.getAbsolutePath());
}
FileOutputStream fos = new FileOutputStream(outFile);
ByteStreams.copy(archiveInputStream, fos);
fos.close();
}
entries++;
}
if (entries == 0) {
throw new IllegalArgumentException("Not able to read any entries from stream (" +
archiveInputStream.getBytesRead() + " bytes read from stream)");
}
}
private static void createArchiveFile(ArchiveOutputStream archiveOutputStream, File baseDir, List<File> inputFiles) throws IOException {
inputFiles.forEach(file -> {
try {
writeFileToTar(archiveOutputStream, baseDir, file);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
archiveOutputStream.close();
}
private static void writeFileToTar(ArchiveOutputStream taos, File baseDir, File file) throws IOException {
log.log(Level.FINEST, () -> "Adding file to tar: " + baseDir.toPath().relativize(file.toPath()).toString());
taos.putArchiveEntry(taos.createArchiveEntry(file, baseDir.toPath().relativize(file.toPath()).toString()));
ByteStreams.copy(new FileInputStream(file), taos);
taos.closeArchiveEntry();
}
private InputStream decompressedInputStream(File inputFile) throws IOException {
log.log(Level.FINE, () -> "Decompressing with type " + type + " and compression type " + compressionType);
switch (type) {
case compressed:
switch (compressionType) {
case gzip:
return new GZIPInputStream(new FileInputStream(inputFile));
case lz4:
return new LZ4BlockInputStream(new FileInputStream(inputFile));
default:
throw new RuntimeException("Unknown compression type " + compressionType);
}
case file:
return new FileInputStream(inputFile);
default:
throw new RuntimeException("Unknown file reference type " + type);
}
}
} | class FileReferenceCompressor {
private static final Logger log = Logger.getLogger(FileReferenceCompressor.class.getName());
private static final int recurseDepth = 100;
private final FileReferenceData.Type type;
private final FileReferenceData.CompressionType compressionType;
public FileReferenceCompressor(FileReferenceData.Type type, FileReferenceData.CompressionType compressionType) {
this.type = Objects.requireNonNull(type, "Type cannot be null");
this.compressionType = Objects.requireNonNull(compressionType, "Compression type cannot be null");
}
public File compress(File baseDir, List<File> inputFiles, File outputFile) throws IOException {
TarArchiveOutputStream archiveOutputStream = new TarArchiveOutputStream(compressedOutputStream(outputFile));
archiveOutputStream.setLongFileMode(TarArchiveOutputStream.LONGFILE_POSIX);
createArchiveFile(archiveOutputStream, baseDir, inputFiles);
return outputFile;
}
public File compress(File directory, File outputFile) throws IOException {
return compress(directory,
Files.find(Paths.get(directory.getAbsolutePath()),
recurseDepth,
(p, basicFileAttributes) -> basicFileAttributes.isRegularFile())
.map(Path::toFile).collect(Collectors.toList()),
outputFile);
}
public void decompress(File inputFile, File outputDir) throws IOException {
log.log(Level.FINE, () -> "Decompressing '" + inputFile + "' into '" + outputDir + "'");
try (ArchiveInputStream ais = new TarArchiveInputStream(decompressedInputStream(inputFile))) {
decompress(ais, outputDir);
} catch (IllegalArgumentException e) {
throw new RuntimeException("Unable to decompress '" + inputFile.getAbsolutePath() + "': " + e.getMessage());
}
}
private static void decompress(ArchiveInputStream archiveInputStream, File outputFile) throws IOException {
int entries = 0;
ArchiveEntry entry;
while ((entry = archiveInputStream.getNextEntry()) != null) {
File outFile = new File(outputFile, entry.getName());
if (entry.isDirectory()) {
if (!(outFile.exists() && outFile.isDirectory())) {
log.log(Level.FINE, () -> "Creating dir: " + outFile.getAbsolutePath());
if (!outFile.mkdirs()) {
log.log(Level.WARNING, "Could not create dir " + entry.getName());
}
}
} else {
File parent = new File(outFile.getParent());
if (!parent.exists() && !parent.mkdirs()) {
log.log(Level.WARNING, "Could not create dir " + parent.getAbsolutePath());
}
FileOutputStream fos = new FileOutputStream(outFile);
ByteStreams.copy(archiveInputStream, fos);
fos.close();
}
entries++;
}
if (entries == 0) {
throw new IllegalArgumentException("Not able to read any entries from stream (" +
archiveInputStream.getBytesRead() + " bytes read from stream)");
}
}
private static void createArchiveFile(ArchiveOutputStream archiveOutputStream, File baseDir, List<File> inputFiles) throws IOException {
inputFiles.forEach(file -> {
try {
writeFileToTar(archiveOutputStream, baseDir, file);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
archiveOutputStream.close();
}
private static void writeFileToTar(ArchiveOutputStream taos, File baseDir, File file) throws IOException {
log.log(Level.FINEST, () -> "Adding file to tar: " + baseDir.toPath().relativize(file.toPath()).toString());
taos.putArchiveEntry(taos.createArchiveEntry(file, baseDir.toPath().relativize(file.toPath()).toString()));
ByteStreams.copy(new FileInputStream(file), taos);
taos.closeArchiveEntry();
}
private InputStream decompressedInputStream(File inputFile) throws IOException {
log.log(Level.FINE, () -> "Decompressing with type " + type + " and compression type " + compressionType);
switch (type) {
case compressed:
switch (compressionType) {
case gzip:
return new GZIPInputStream(new FileInputStream(inputFile));
case lz4:
return new LZ4BlockInputStream(new FileInputStream(inputFile));
default:
throw new RuntimeException("Unknown compression type " + compressionType);
}
case file:
return new FileInputStream(inputFile);
default:
throw new RuntimeException("Unknown file reference type " + type);
}
}
} |
```suggestion return 1 << 25; // 32MB ``` | public long maxPendingBytes() {
return 1 << 25;
} | return 1 << 25; | public long maxPendingBytes() {
return 1 << 25;
} | class ProxyResponse extends HttpResponse {
private final CloseableHttpResponse clientResponse;
ProxyResponse(CloseableHttpResponse clientResponse) {
super(clientResponse.getCode());
this.clientResponse = clientResponse;
}
@Override
public String getContentType() {
return Optional.ofNullable(clientResponse.getFirstHeader("Content-Type"))
.map(NameValuePair::getValue)
.orElseGet(super::getContentType);
}
@Override
public void render(OutputStream outputStream) throws IOException {
try (clientResponse) {
clientResponse.getEntity().writeTo(outputStream);
}
}
@Override
} | class ProxyResponse extends HttpResponse {
private final CloseableHttpResponse clientResponse;
ProxyResponse(CloseableHttpResponse clientResponse) {
super(clientResponse.getCode());
this.clientResponse = clientResponse;
}
@Override
public String getContentType() {
return Optional.ofNullable(clientResponse.getFirstHeader("Content-Type"))
.map(NameValuePair::getValue)
.orElseGet(super::getContentType);
}
@Override
public void render(OutputStream outputStream) throws IOException {
try (clientResponse) {
clientResponse.getEntity().writeTo(outputStream);
}
}
@Override
} |
```suggestion return 1 << 25; // 32MB ``` | static void render(OutputStream out, Collection<LogRecord> log) throws IOException {
out.write("{\"logRecords\":[".getBytes(UTF_8));
boolean first = true;
for (LogRecord record : log) {
String message = record.getMessage() == null ? "" : record.getMessage();
if (record.getThrown() != null) {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
record.getThrown().printStackTrace(new PrintStream(buffer));
message += (message.isEmpty() ? "" : "\n") + buffer;
}
if (first) first = false;
else out.write(',');
out.write("""
{"id":%d,"at":%d,"type":"%s","message":"%s"}
""".formatted(record.getSequenceNumber(),
record.getMillis(),
typeOf(record.getLevel()),
message).getBytes(UTF_8));
}
out.write(']');
}
public static String typeOf(Level level) {
return level.getName().equals("html") ? "html"
: level.intValue() < Level.INFO.intValue() ? "debug"
: level.intValue() < Level.WARNING.intValue() ? "info"
: level.intValue() < Level.SEVERE.intValue() ? "warning"
: "error";
}
static void render(OutputStream out, TestReport report) throws IOException {
out.write('{');
out.write("\"report\":".getBytes(UTF_8));
render(out, (Node) report.root());
out.write(",\"summary\":{".getBytes(UTF_8));
renderSummary(out, report);
out.write(",\"failures\":[".getBytes(UTF_8));
renderFailures(out, report.root(), true);
out.write("]".getBytes(UTF_8));
out.write("}".getBytes(UTF_8));
out.write(",\"output\":[".getBytes(UTF_8));
renderOutput(out, report.root(), true);
out.write("]".getBytes(UTF_8));
out.write('}');
}
static void renderSummary(OutputStream out, TestReport report) throws IOException {
Map<TestReport.Status, Long> tally = report.root().tally();
out.write("""
"success":%d,"failed":%d,"ignored":%d,"aborted":%d,"inconclusive":%d
""".formatted(tally.getOrDefault(TestReport.Status.successful, 0L),
tally.getOrDefault(TestReport.Status.failed, 0L) + tally.getOrDefault(TestReport.Status.error, 0L),
tally.getOrDefault(TestReport.Status.skipped, 0L),
tally.getOrDefault(TestReport.Status.aborted, 0L),
tally.getOrDefault(TestReport.Status.inconclusive, 0L)).getBytes(UTF_8));
}
static boolean renderFailures(OutputStream out, Node node, boolean first) throws IOException {
if (node instanceof FailureNode) {
if (first) first = false;
else out.write(',');
String message = ((FailureNode) node).thrown().getMessage();
out.write("""
{"testName":"%s","testError":%s,"exception":"%s"}
""".formatted(node.parent.name(),
message == null ? null : '"' + message + '"',
ExceptionUtils.getStackTraceAsString(((FailureNode) node).thrown())).getBytes(UTF_8));
}
else {
for (Node child : node.children())
first = renderFailures(out, child, first);
}
return first;
}
static boolean renderOutput(OutputStream out, Node node, boolean first) throws IOException {
if (node instanceof OutputNode) {
for (LogRecord record : ((OutputNode) node).log())
if (record.getMessage() != null) {
if (first) first = false;
else out.write(',');
out.write(('"' + formatter.format(record.getInstant().atOffset(ZoneOffset.UTC)) + " " + record.getMessage() + '"').getBytes(UTF_8));
}
}
else {
for (Node child : node.children())
first = renderOutput(out, child, first);
}
return first;
}
static void render(OutputStream out, Node node) throws IOException {
out.write('{');
if (node instanceof NamedNode) render(out, (NamedNode) node);
if (node instanceof OutputNode) render(out, (OutputNode) node);
if ( ! node.children().isEmpty()) {
out.write(",\"children\":[".getBytes(UTF_8));
boolean first = true;
for (Node child : node.children) {
if (first) first = false;
else out.write(',');
render(out, child);
}
out.write(']');
}
out.write('}');
}
static void render(OutputStream out, NamedNode node) throws IOException {
String type = node instanceof FailureNode ? "failure" : node instanceof TestNode ? "test" : "container";
out.write("""
"type":"%s","name":"%s","status":"%s","start":%d,"duration":%d
""".formatted(type,node.name(), node.status().name(), node.start().toEpochMilli(), node.duration().toMillis()).getBytes(UTF_8));
}
static void render(OutputStream out, OutputNode node) throws IOException {
out.write("\"type\":\"output\",\"children\":[".getBytes(UTF_8));
boolean first = true;
for (LogRecord record : node.log()) {
if (first) first = false;
else out.write(',');
out.write("""
{"message":"%s","at":%d,"level":"%s"}
""".formatted((record.getLoggerName() == null ? "" : record.getLoggerName() + ": ") +
(record.getMessage() != null ? record.getMessage() : "") +
(record.getThrown() != null ? (record.getMessage() != null ? "\n" : "") + traceToString(record.getThrown()) : ""),
record.getInstant().toEpochMilli(),
typeOf(record.getLevel())).getBytes(UTF_8));
}
out.write(']');
}
private static String traceToString(Throwable thrown) {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(buffer));
return buffer.toString(UTF_8);
}
interface Renderer {
void render(OutputStream out) throws IOException;
}
static class CustomJsonResponse extends HttpResponse {
private final Renderer renderer;
CustomJsonResponse(Renderer renderer) {
super(200);
this.renderer = renderer;
}
@Override
public void render(OutputStream outputStream) throws IOException {
renderer.render(outputStream);
}
@Override
public String getContentType() {
return "application/json";
}
@Override
public long maxPendingBytes() {
return 1 << 25;
}
}
} | return 1 << 25; | static void render(OutputStream out, Collection<LogRecord> log) throws IOException {
out.write("{\"logRecords\":[".getBytes(UTF_8));
boolean first = true;
for (LogRecord record : log) {
String message = record.getMessage() == null ? "" : record.getMessage();
if (record.getThrown() != null) {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
record.getThrown().printStackTrace(new PrintStream(buffer));
message += (message.isEmpty() ? "" : "\n") + buffer;
}
if (first) first = false;
else out.write(',');
out.write("""
{"id":%d,"at":%d,"type":"%s","message":"%s"}
""".formatted(record.getSequenceNumber(),
record.getMillis(),
typeOf(record.getLevel()),
message).getBytes(UTF_8));
}
out.write("]}".getBytes(UTF_8));
} | class TestRunnerHandler extends ThreadedHttpRequestHandler {
private static final DateTimeFormatter formatter = DateTimeFormatter.ofPattern("HH:mm:ss.SSS");
private final TestRunner testRunner;
@Inject
public TestRunnerHandler(Executor executor, ComponentRegistry<TestRunner> testRunners) {
this(executor, AggregateTestRunner.of(testRunners.allComponents()));
}
TestRunnerHandler(Executor executor, TestRunner testRunner) {
super(executor);
this.testRunner = testRunner;
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
case POST: return handlePOST(request);
default: return new MessageResponse(Status.METHOD_NOT_ALLOWED, "Method '" + request.getMethod() + "' is not supported");
}
} catch (IllegalArgumentException e) {
return new MessageResponse(Status.BAD_REQUEST, Exceptions.toMessageString(e));
} catch (Exception e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return new MessageResponse(Status.INTERNAL_SERVER_ERROR, Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(HttpRequest request) {
String path = request.getUri().getPath();
switch (path) {
case "/tester/v1/log":
long fetchRecordsAfter = Optional.ofNullable(request.getProperty("after"))
.map(Long::parseLong)
.orElse(-1L);
return new CustomJsonResponse(out -> render(out, testRunner.getLog(fetchRecordsAfter)));
case "/tester/v1/status":
return new MessageResponse(testRunner.getStatus().name());
case "/tester/v1/report":
TestReport report = testRunner.getReport();
if (report == null) return new EmptyResponse(204);
else return new CustomJsonResponse(out -> render(out, report));
}
return new MessageResponse(Status.NOT_FOUND, "Not found: " + request.getUri().getPath());
}
private HttpResponse handlePOST(HttpRequest request) throws IOException {
final String path = request.getUri().getPath();
if (path.startsWith("/tester/v1/run/")) {
String type = lastElement(path);
TestRunner.Suite testSuite = TestRunner.Suite.valueOf(type.toUpperCase() + "_TEST");
byte[] config = request.getData().readAllBytes();
testRunner.test(testSuite, config);
log.info("Started tests of type " + type + " and status is " + testRunner.getStatus());
return new MessageResponse("Successfully started " + type + " tests");
}
return new MessageResponse(Status.NOT_FOUND, "Not found: " + request.getUri().getPath());
}
private static String lastElement(String path) {
if (path.endsWith("/"))
path = path.substring(0, path.length() - 1);
int lastSlash = path.lastIndexOf("/");
if (lastSlash < 0) return path;
return path.substring(lastSlash + 1);
} | class TestRunnerHandler extends ThreadedHttpRequestHandler {
private static final DateTimeFormatter formatter = DateTimeFormatter.ofPattern("HH:mm:ss.SSS");
private final TestRunner testRunner;
@Inject
public TestRunnerHandler(Executor executor, ComponentRegistry<TestRunner> testRunners) {
this(executor, AggregateTestRunner.of(testRunners.allComponents()));
}
TestRunnerHandler(Executor executor, TestRunner testRunner) {
super(executor);
this.testRunner = testRunner;
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
case POST: return handlePOST(request);
default: return new MessageResponse(Status.METHOD_NOT_ALLOWED, "Method '" + request.getMethod() + "' is not supported");
}
} catch (IllegalArgumentException e) {
return new MessageResponse(Status.BAD_REQUEST, Exceptions.toMessageString(e));
} catch (Exception e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return new MessageResponse(Status.INTERNAL_SERVER_ERROR, Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(HttpRequest request) {
String path = request.getUri().getPath();
switch (path) {
case "/tester/v1/log":
long fetchRecordsAfter = Optional.ofNullable(request.getProperty("after"))
.map(Long::parseLong)
.orElse(-1L);
return new CustomJsonResponse(out -> render(out, testRunner.getLog(fetchRecordsAfter)));
case "/tester/v1/status":
return new MessageResponse(testRunner.getStatus().name());
case "/tester/v1/report":
TestReport report = testRunner.getReport();
if (report == null) return new EmptyResponse(204);
else return new CustomJsonResponse(out -> render(out, report));
}
return new MessageResponse(Status.NOT_FOUND, "Not found: " + request.getUri().getPath());
}
private HttpResponse handlePOST(HttpRequest request) throws IOException {
final String path = request.getUri().getPath();
if (path.startsWith("/tester/v1/run/")) {
String type = lastElement(path);
TestRunner.Suite testSuite = TestRunner.Suite.valueOf(type.toUpperCase() + "_TEST");
byte[] config = request.getData().readAllBytes();
testRunner.test(testSuite, config);
log.info("Started tests of type " + type + " and status is " + testRunner.getStatus());
return new MessageResponse("Successfully started " + type + " tests");
}
return new MessageResponse(Status.NOT_FOUND, "Not found: " + request.getUri().getPath());
}
private static String lastElement(String path) {
if (path.endsWith("/"))
path = path.substring(0, path.length() - 1);
int lastSlash = path.lastIndexOf("/");
if (lastSlash < 0) return path;
return path.substring(lastSlash + 1);
}
public static String typeOf(Level level) {
return level.getName().equals("html") ? "html"
: level.intValue() < Level.INFO.intValue() ? "debug"
: level.intValue() < Level.WARNING.intValue() ? "info"
: level.intValue() < Level.SEVERE.intValue() ? "warning"
: "error";
}
static void render(OutputStream out, TestReport report) throws IOException {
out.write('{');
out.write("\"report\":".getBytes(UTF_8));
render(out, (Node) report.root());
out.write(",\"summary\":{".getBytes(UTF_8));
renderSummary(out, report);
out.write(",\"failures\":[".getBytes(UTF_8));
renderFailures(out, report.root(), true);
out.write("]".getBytes(UTF_8));
out.write("}".getBytes(UTF_8));
out.write(",\"output\":[".getBytes(UTF_8));
renderOutput(out, report.root(), true);
out.write("]".getBytes(UTF_8));
out.write('}');
}
static void renderSummary(OutputStream out, TestReport report) throws IOException {
Map<TestReport.Status, Long> tally = report.root().tally();
out.write("""
"success":%d,"failed":%d,"ignored":%d,"aborted":%d,"inconclusive":%d
""".formatted(tally.getOrDefault(TestReport.Status.successful, 0L),
tally.getOrDefault(TestReport.Status.failed, 0L) + tally.getOrDefault(TestReport.Status.error, 0L),
tally.getOrDefault(TestReport.Status.skipped, 0L),
tally.getOrDefault(TestReport.Status.aborted, 0L),
tally.getOrDefault(TestReport.Status.inconclusive, 0L)).getBytes(UTF_8));
}
static boolean renderFailures(OutputStream out, Node node, boolean first) throws IOException {
if (node instanceof FailureNode) {
if (first) first = false;
else out.write(',');
String message = ((FailureNode) node).thrown().getMessage();
out.write("""
{"testName":"%s","testError":%s,"exception":"%s"}
""".formatted(node.parent.name(),
message == null ? null : '"' + message + '"',
ExceptionUtils.getStackTraceAsString(((FailureNode) node).thrown())).getBytes(UTF_8));
}
else {
for (Node child : node.children())
first = renderFailures(out, child, first);
}
return first;
}
static boolean renderOutput(OutputStream out, Node node, boolean first) throws IOException {
if (node instanceof OutputNode) {
for (LogRecord record : ((OutputNode) node).log())
if (record.getMessage() != null) {
if (first) first = false;
else out.write(',');
out.write(('"' + formatter.format(record.getInstant().atOffset(ZoneOffset.UTC)) + " " + record.getMessage() + '"').getBytes(UTF_8));
}
}
else {
for (Node child : node.children())
first = renderOutput(out, child, first);
}
return first;
}
static void render(OutputStream out, Node node) throws IOException {
out.write('{');
if (node instanceof NamedNode) render(out, (NamedNode) node);
if (node instanceof OutputNode) render(out, (OutputNode) node);
if ( ! node.children().isEmpty()) {
out.write(",\"children\":[".getBytes(UTF_8));
boolean first = true;
for (Node child : node.children) {
if (first) first = false;
else out.write(',');
render(out, child);
}
out.write(']');
}
out.write('}');
}
static void render(OutputStream out, NamedNode node) throws IOException {
String type = node instanceof FailureNode ? "failure" : node instanceof TestNode ? "test" : "container";
out.write("""
"type":"%s","name":"%s","status":"%s","start":%d,"duration":%d
""".formatted(type,node.name(), node.status().name(), node.start().toEpochMilli(), node.duration().toMillis()).getBytes(UTF_8));
}
static void render(OutputStream out, OutputNode node) throws IOException {
out.write("\"type\":\"output\",\"children\":[".getBytes(UTF_8));
boolean first = true;
for (LogRecord record : node.log()) {
if (first) first = false;
else out.write(',');
out.write("""
{"message":"%s","at":%d,"level":"%s"}
""".formatted((record.getLoggerName() == null ? "" : record.getLoggerName() + ": ") +
(record.getMessage() != null ? record.getMessage() : "") +
(record.getThrown() != null ? (record.getMessage() != null ? "\n" : "") + traceToString(record.getThrown()) : ""),
record.getInstant().toEpochMilli(),
typeOf(record.getLevel())).getBytes(UTF_8));
}
out.write(']');
}
private static String traceToString(Throwable thrown) {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(buffer));
return buffer.toString(UTF_8);
}
interface Renderer {
void render(OutputStream out) throws IOException;
}
static class CustomJsonResponse extends HttpResponse {
private final Renderer renderer;
CustomJsonResponse(Renderer renderer) {
super(200);
this.renderer = renderer;
}
@Override
public void render(OutputStream outputStream) throws IOException {
renderer.render(outputStream);
}
@Override
public String getContentType() {
return "application/json";
}
@Override
public long maxPendingBytes() {
return 1 << 25;
}
}
} |
Might be better to explicitly iterate over statuses, so we properly report 0 instead of no data | private void updateMetrics() {
curator.readChangeRequests()
.stream()
.collect(Collectors.groupingBy(VespaChangeRequest::getStatus))
.forEach((status, cmrs) ->
metric.set(TRACKED_CMRS_METRIC, cmrs.size(), metric.createContext(Map.of("status", status.name())))
);
} | metric.set(TRACKED_CMRS_METRIC, cmrs.size(), metric.createContext(Map.of("status", status.name()))) | private void updateMetrics() {
var cmrsByStatus = curator.readChangeRequests()
.stream()
.collect(Collectors.groupingBy(VespaChangeRequest::getStatus));
for (var status : Status.values()) {
var count = cmrsByStatus.getOrDefault(status, List.of()).size();
metric.set(TRACKED_CMRS_METRIC, count, metric.createContext(Map.of("status", status.name())));
}
} | class VcmrMaintainer extends ControllerMaintainer {
private static final Logger LOG = Logger.getLogger(VcmrMaintainer.class.getName());
private static final int DAYS_TO_RETIRE = 2;
private static final Duration ALLOWED_POSTPONEMENT_TIME = Duration.ofDays(7);
protected static final String TRACKED_CMRS_METRIC = "cmr.tracked";
private final CuratorDb curator;
private final NodeRepository nodeRepository;
private final ChangeRequestClient changeRequestClient;
private final SystemName system;
private final Metric metric;
public VcmrMaintainer(Controller controller, Duration interval, Metric metric) {
super(controller, interval, null, SystemName.allOf(Predicate.not(SystemName::isPublic)));
this.curator = controller.curator();
this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
this.changeRequestClient = controller.serviceRegistry().changeRequestClient();
this.system = controller.system();
this.metric = metric;
}
@Override
protected double maintain() {
var changeRequests = curator.readChangeRequests()
.stream()
.filter(shouldUpdate()).toList();
var nodesByZone = nodesByZone();
changeRequests.forEach(changeRequest -> {
var nodes = impactedNodes(nodesByZone, changeRequest);
var nextActions = getNextActions(nodes, changeRequest);
var status = getStatus(nextActions, changeRequest);
try (var lock = curator.lockChangeRequests()) {
curator.readChangeRequest(changeRequest.getId())
.ifPresent(vcmr -> {
var updatedVcmr = vcmr.withActionPlan(nextActions)
.withStatus(status);
curator.writeChangeRequest(updatedVcmr);
if (nodes.keySet().size() == 1)
approveChangeRequest(updatedVcmr);
});
}
});
updateMetrics();
return 1.0;
}
/**
* Status is based on:
* 1. Whether the source has reportedly closed the request
* 2. Whether any host requires operator action
* 3. Whether any host is pending/started/finished retirement
*/
private Status getStatus(List<HostAction> nextActions, VespaChangeRequest changeRequest) {
if (changeRequest.getChangeRequestSource().isClosed()) {
return Status.COMPLETED;
}
var byActionState = nextActions.stream().collect(Collectors.groupingBy(HostAction::getState, Collectors.counting()));
if (byActionState.getOrDefault(State.REQUIRES_OPERATOR_ACTION, 0L) > 0) {
return Status.REQUIRES_OPERATOR_ACTION;
}
if (byActionState.getOrDefault(State.OUT_OF_SYNC, 0L) > 0) {
return Status.OUT_OF_SYNC;
}
if (byActionState.getOrDefault(State.RETIRING, 0L) > 0) {
return Status.IN_PROGRESS;
}
if (Set.of(State.RETIRED, State.NONE).containsAll(byActionState.keySet())) {
return Status.READY;
}
if (byActionState.getOrDefault(State.PENDING_RETIREMENT, 0L) > 0) {
return Status.PENDING_ACTION;
}
return Status.NOOP;
}
private List<HostAction> getNextActions(Map<ZoneId, List<Node>> nodesByZone, VespaChangeRequest changeRequest) {
return nodesByZone.entrySet()
.stream()
.flatMap(entry -> {
var zone = entry.getKey();
var nodes = entry.getValue();
if (nodes.isEmpty()) {
return Stream.empty();
}
var spareCapacity = hasSpareCapacity(zone, nodes);
return nodes.stream().map(node -> nextAction(zone, node, changeRequest, spareCapacity));
}).collect(Collectors.toList());
}
private Map<ZoneId, List<Node>> impactedNodes(Map<ZoneId, List<Node>> nodesByZone, VespaChangeRequest changeRequest) {
return nodesByZone.entrySet()
.stream()
.filter(entry -> entry.getValue().stream().anyMatch(isImpacted(changeRequest)))
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> entry.getValue().stream().filter(isImpacted(changeRequest)).collect(Collectors.toList())
));
}
private Optional<HostAction> getPreviousAction(Node node, VespaChangeRequest changeRequest) {
return changeRequest.getHostActionPlan()
.stream()
.filter(hostAction -> hostAction.getHostname().equals(node.hostname().value()))
.findFirst();
}
private HostAction nextAction(ZoneId zoneId, Node node, VespaChangeRequest changeRequest, boolean spareCapacity) {
var hostAction = getPreviousAction(node, changeRequest)
.orElse(new HostAction(node.hostname().value(), State.NONE, Instant.now()));
if (changeRequest.getChangeRequestSource().isClosed()) {
LOG.fine(() -> changeRequest.getChangeRequestSource().getId() + " is closed, recycling " + node.hostname());
recycleNode(zoneId, node, hostAction);
removeReport(zoneId, changeRequest, node);
return hostAction.withState(State.COMPLETE);
}
if (isLowImpact(changeRequest))
return hostAction;
addReport(zoneId, changeRequest, node);
if (isOutOfSync(node, hostAction))
return hostAction.withState(State.OUT_OF_SYNC);
if (isPostponed(changeRequest, hostAction)) {
LOG.fine(() -> changeRequest.getChangeRequestSource().getId() + " is postponed, recycling " + node.hostname());
recycleNode(zoneId, node, hostAction);
return hostAction.withState(State.PENDING_RETIREMENT);
}
if (node.type() != NodeType.host || !spareCapacity) {
return hostAction.withState(State.REQUIRES_OPERATOR_ACTION);
}
if (shouldRetire(changeRequest, hostAction)) {
if (!node.wantToRetire()) {
LOG.info(Text.format("Retiring %s due to %s", node.hostname().value(), changeRequest.getChangeRequestSource().getId()));
try {
setWantToRetire(zoneId, node, true);
} catch (Exception e) {
LOG.warning("Failed to retire host " + node.hostname() + ": " + Exceptions.toMessageString(e));
if (!nodeRepository.getNode(zoneId, node.hostname().value()).wantToRetire()) {
return hostAction;
}
}
}
return hostAction.withState(State.RETIRING);
}
if (hasRetired(node, hostAction)) {
LOG.fine(() -> node.hostname() + " has retired");
return hostAction.withState(State.RETIRED);
}
if (pendingRetirement(node, hostAction)) {
LOG.fine(() -> node.hostname() + " is pending retirement");
return hostAction.withState(State.PENDING_RETIREMENT);
}
if (isFailed(node)) {
return hostAction.withState(State.NONE);
}
return hostAction;
}
private void recycleNode(ZoneId zoneId, Node node, HostAction hostAction) {
if (hostAction.getState() == State.RETIRED &&
node.state() == Node.State.parked) {
LOG.info("Setting " + node.hostname() + " to dirty");
nodeRepository.setState(zoneId, Node.State.dirty, node.hostname().value());
}
if (hostAction.getState() == State.RETIRING && node.wantToRetire()) {
try {
setWantToRetire(zoneId, node, false);
} catch (Exception ignored) {}
}
}
private boolean isPostponed(VespaChangeRequest changeRequest, HostAction action) {
return List.of(State.RETIRED, State.RETIRING).contains(action.getState()) &&
changeRequest.getChangeRequestSource().getPlannedStartTime()
.minus(ALLOWED_POSTPONEMENT_TIME)
.isAfter(ZonedDateTime.now());
}
private boolean shouldRetire(VespaChangeRequest changeRequest, HostAction action) {
return action.getState() == State.PENDING_RETIREMENT &&
getRetirementStartTime(changeRequest.getChangeRequestSource().getPlannedStartTime())
.isBefore(ZonedDateTime.now());
}
private boolean hasRetired(Node node, HostAction hostAction) {
return List.of(State.RETIRING, State.REQUIRES_OPERATOR_ACTION).contains(hostAction.getState()) &&
node.state() == Node.State.parked;
}
private boolean pendingRetirement(Node node, HostAction action) {
return List.of(State.NONE, State.REQUIRES_OPERATOR_ACTION).contains(action.getState())
&& node.state() == Node.State.active;
}
private boolean isOutOfSync(Node node, HostAction action) {
return action.getState() == State.RETIRED && node.state() != Node.State.parked ||
action.getState() == State.RETIRING && !node.wantToRetire();
}
private boolean isFailed(Node node) {
return node.state() == Node.State.failed ||
node.state() == Node.State.breakfixed;
}
private Map<ZoneId, List<Node>> nodesByZone() {
return controller().zoneRegistry()
.zones()
.reachable()
.in(Environment.prod)
.ids()
.stream()
.collect(Collectors.toMap(
zone -> zone,
zone -> nodeRepository.list(zone, NodeFilter.all())
));
}
private Predicate<Node> isImpacted(VespaChangeRequest changeRequest) {
return node -> changeRequest.getImpactedHosts().contains(node.hostname().value()) ||
node.switchHostname()
.map(switchHostname -> changeRequest.getImpactedSwitches().contains(switchHostname))
.orElse(false);
}
private Predicate<VespaChangeRequest> shouldUpdate() {
return changeRequest -> changeRequest.getStatus() != Status.COMPLETED;
}
private boolean isLowImpact(VespaChangeRequest changeRequest) {
return !List.of(Impact.HIGH, Impact.VERY_HIGH)
.contains(changeRequest.getImpact());
}
private boolean hasSpareCapacity(ZoneId zoneId, List<Node> nodes) {
var tenantHosts = nodes.stream()
.filter(node -> node.type() == NodeType.host)
.map(Node::hostname)
.collect(Collectors.toList());
return tenantHosts.isEmpty() ||
nodeRepository.isReplaceable(zoneId, tenantHosts);
}
private void setWantToRetire(ZoneId zoneId, Node node, boolean wantToRetire) {
nodeRepository.retire(zoneId, node.hostname().value(), wantToRetire, false);
}
private void approveChangeRequest(VespaChangeRequest changeRequest) {
if (!system.equals(SystemName.main))
return;
if (changeRequest.getStatus() == Status.REQUIRES_OPERATOR_ACTION)
return;
if (changeRequest.getApproval() != ChangeRequest.Approval.REQUESTED)
return;
LOG.info("Approving " + changeRequest.getChangeRequestSource().getId());
changeRequestClient.approveChangeRequest(changeRequest);
}
private void removeReport(ZoneId zoneId, VespaChangeRequest changeRequest, Node node) {
var report = VcmrReport.fromReports(node.reports());
if (report.removeVcmr(changeRequest.getChangeRequestSource().getId())) {
updateReport(zoneId, node, report);
}
}
private void addReport(ZoneId zoneId, VespaChangeRequest changeRequest, Node node) {
var report = VcmrReport.fromReports(node.reports());
var source = changeRequest.getChangeRequestSource();
if (report.addVcmr(source.getId(), source.getPlannedStartTime(), source.getPlannedEndTime())) {
updateReport(zoneId, node, report);
}
}
private void updateReport(ZoneId zoneId, Node node, VcmrReport report) {
LOG.fine(() -> Text.format("Updating report for %s: %s", node.hostname(), report));
nodeRepository.updateReports(zoneId, node.hostname().value(), report.toNodeReports());
}
protected ZonedDateTime getRetirementStartTime(ZonedDateTime plannedStartTime) {
var time = plannedStartTime;
var days = 0;
while (days < DAYS_TO_RETIRE) {
time = time.minusDays(1);
if (time.getDayOfWeek().getValue() < 6) days++;
}
return time;
}
} | class VcmrMaintainer extends ControllerMaintainer {
private static final Logger LOG = Logger.getLogger(VcmrMaintainer.class.getName());
private static final int DAYS_TO_RETIRE = 2;
private static final Duration ALLOWED_POSTPONEMENT_TIME = Duration.ofDays(7);
protected static final String TRACKED_CMRS_METRIC = "cmr.tracked";
private final CuratorDb curator;
private final NodeRepository nodeRepository;
private final ChangeRequestClient changeRequestClient;
private final SystemName system;
private final Metric metric;
public VcmrMaintainer(Controller controller, Duration interval, Metric metric) {
super(controller, interval, null, SystemName.allOf(Predicate.not(SystemName::isPublic)));
this.curator = controller.curator();
this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
this.changeRequestClient = controller.serviceRegistry().changeRequestClient();
this.system = controller.system();
this.metric = metric;
}
@Override
protected double maintain() {
var changeRequests = curator.readChangeRequests()
.stream()
.filter(shouldUpdate()).toList();
var nodesByZone = nodesByZone();
changeRequests.forEach(changeRequest -> {
var nodes = impactedNodes(nodesByZone, changeRequest);
var nextActions = getNextActions(nodes, changeRequest);
var status = getStatus(nextActions, changeRequest);
try (var lock = curator.lockChangeRequests()) {
curator.readChangeRequest(changeRequest.getId())
.ifPresent(vcmr -> {
var updatedVcmr = vcmr.withActionPlan(nextActions)
.withStatus(status);
curator.writeChangeRequest(updatedVcmr);
if (nodes.keySet().size() == 1)
approveChangeRequest(updatedVcmr);
});
}
});
updateMetrics();
return 1.0;
}
/**
* Status is based on:
* 1. Whether the source has reportedly closed the request
* 2. Whether any host requires operator action
* 3. Whether any host is pending/started/finished retirement
*/
private Status getStatus(List<HostAction> nextActions, VespaChangeRequest changeRequest) {
if (changeRequest.getChangeRequestSource().isClosed()) {
return Status.COMPLETED;
}
var byActionState = nextActions.stream().collect(Collectors.groupingBy(HostAction::getState, Collectors.counting()));
if (byActionState.getOrDefault(State.REQUIRES_OPERATOR_ACTION, 0L) > 0) {
return Status.REQUIRES_OPERATOR_ACTION;
}
if (byActionState.getOrDefault(State.OUT_OF_SYNC, 0L) > 0) {
return Status.OUT_OF_SYNC;
}
if (byActionState.getOrDefault(State.RETIRING, 0L) > 0) {
return Status.IN_PROGRESS;
}
if (Set.of(State.RETIRED, State.NONE).containsAll(byActionState.keySet())) {
return Status.READY;
}
if (byActionState.getOrDefault(State.PENDING_RETIREMENT, 0L) > 0) {
return Status.PENDING_ACTION;
}
return Status.NOOP;
}
private List<HostAction> getNextActions(Map<ZoneId, List<Node>> nodesByZone, VespaChangeRequest changeRequest) {
return nodesByZone.entrySet()
.stream()
.flatMap(entry -> {
var zone = entry.getKey();
var nodes = entry.getValue();
if (nodes.isEmpty()) {
return Stream.empty();
}
var spareCapacity = hasSpareCapacity(zone, nodes);
return nodes.stream().map(node -> nextAction(zone, node, changeRequest, spareCapacity));
}).collect(Collectors.toList());
}
private Map<ZoneId, List<Node>> impactedNodes(Map<ZoneId, List<Node>> nodesByZone, VespaChangeRequest changeRequest) {
return nodesByZone.entrySet()
.stream()
.filter(entry -> entry.getValue().stream().anyMatch(isImpacted(changeRequest)))
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> entry.getValue().stream().filter(isImpacted(changeRequest)).collect(Collectors.toList())
));
}
private Optional<HostAction> getPreviousAction(Node node, VespaChangeRequest changeRequest) {
return changeRequest.getHostActionPlan()
.stream()
.filter(hostAction -> hostAction.getHostname().equals(node.hostname().value()))
.findFirst();
}
private HostAction nextAction(ZoneId zoneId, Node node, VespaChangeRequest changeRequest, boolean spareCapacity) {
var hostAction = getPreviousAction(node, changeRequest)
.orElse(new HostAction(node.hostname().value(), State.NONE, Instant.now()));
if (changeRequest.getChangeRequestSource().isClosed()) {
LOG.fine(() -> changeRequest.getChangeRequestSource().getId() + " is closed, recycling " + node.hostname());
recycleNode(zoneId, node, hostAction);
removeReport(zoneId, changeRequest, node);
return hostAction.withState(State.COMPLETE);
}
if (isLowImpact(changeRequest))
return hostAction;
addReport(zoneId, changeRequest, node);
if (isOutOfSync(node, hostAction))
return hostAction.withState(State.OUT_OF_SYNC);
if (isPostponed(changeRequest, hostAction)) {
LOG.fine(() -> changeRequest.getChangeRequestSource().getId() + " is postponed, recycling " + node.hostname());
recycleNode(zoneId, node, hostAction);
return hostAction.withState(State.PENDING_RETIREMENT);
}
if (node.type() != NodeType.host || !spareCapacity) {
return hostAction.withState(State.REQUIRES_OPERATOR_ACTION);
}
if (shouldRetire(changeRequest, hostAction)) {
if (!node.wantToRetire()) {
LOG.info(Text.format("Retiring %s due to %s", node.hostname().value(), changeRequest.getChangeRequestSource().getId()));
try {
setWantToRetire(zoneId, node, true);
} catch (Exception e) {
LOG.warning("Failed to retire host " + node.hostname() + ": " + Exceptions.toMessageString(e));
if (!nodeRepository.getNode(zoneId, node.hostname().value()).wantToRetire()) {
return hostAction;
}
}
}
return hostAction.withState(State.RETIRING);
}
if (hasRetired(node, hostAction)) {
LOG.fine(() -> node.hostname() + " has retired");
return hostAction.withState(State.RETIRED);
}
if (pendingRetirement(node, hostAction)) {
LOG.fine(() -> node.hostname() + " is pending retirement");
return hostAction.withState(State.PENDING_RETIREMENT);
}
if (isFailed(node)) {
return hostAction.withState(State.NONE);
}
return hostAction;
}
private void recycleNode(ZoneId zoneId, Node node, HostAction hostAction) {
if (hostAction.getState() == State.RETIRED &&
node.state() == Node.State.parked) {
LOG.info("Setting " + node.hostname() + " to dirty");
nodeRepository.setState(zoneId, Node.State.dirty, node.hostname().value());
}
if (hostAction.getState() == State.RETIRING && node.wantToRetire()) {
try {
setWantToRetire(zoneId, node, false);
} catch (Exception ignored) {}
}
}
private boolean isPostponed(VespaChangeRequest changeRequest, HostAction action) {
return List.of(State.RETIRED, State.RETIRING).contains(action.getState()) &&
changeRequest.getChangeRequestSource().getPlannedStartTime()
.minus(ALLOWED_POSTPONEMENT_TIME)
.isAfter(ZonedDateTime.now());
}
private boolean shouldRetire(VespaChangeRequest changeRequest, HostAction action) {
return action.getState() == State.PENDING_RETIREMENT &&
getRetirementStartTime(changeRequest.getChangeRequestSource().getPlannedStartTime())
.isBefore(ZonedDateTime.now());
}
private boolean hasRetired(Node node, HostAction hostAction) {
return List.of(State.RETIRING, State.REQUIRES_OPERATOR_ACTION).contains(hostAction.getState()) &&
node.state() == Node.State.parked;
}
private boolean pendingRetirement(Node node, HostAction action) {
return List.of(State.NONE, State.REQUIRES_OPERATOR_ACTION).contains(action.getState())
&& node.state() == Node.State.active;
}
private boolean isOutOfSync(Node node, HostAction action) {
return action.getState() == State.RETIRED && node.state() != Node.State.parked ||
action.getState() == State.RETIRING && !node.wantToRetire();
}
private boolean isFailed(Node node) {
return node.state() == Node.State.failed ||
node.state() == Node.State.breakfixed;
}
private Map<ZoneId, List<Node>> nodesByZone() {
return controller().zoneRegistry()
.zones()
.reachable()
.in(Environment.prod)
.ids()
.stream()
.collect(Collectors.toMap(
zone -> zone,
zone -> nodeRepository.list(zone, NodeFilter.all())
));
}
private Predicate<Node> isImpacted(VespaChangeRequest changeRequest) {
return node -> changeRequest.getImpactedHosts().contains(node.hostname().value()) ||
node.switchHostname()
.map(switchHostname -> changeRequest.getImpactedSwitches().contains(switchHostname))
.orElse(false);
}
private Predicate<VespaChangeRequest> shouldUpdate() {
return changeRequest -> changeRequest.getStatus() != Status.COMPLETED;
}
private boolean isLowImpact(VespaChangeRequest changeRequest) {
return !List.of(Impact.HIGH, Impact.VERY_HIGH)
.contains(changeRequest.getImpact());
}
private boolean hasSpareCapacity(ZoneId zoneId, List<Node> nodes) {
var tenantHosts = nodes.stream()
.filter(node -> node.type() == NodeType.host)
.map(Node::hostname)
.collect(Collectors.toList());
return tenantHosts.isEmpty() ||
nodeRepository.isReplaceable(zoneId, tenantHosts);
}
private void setWantToRetire(ZoneId zoneId, Node node, boolean wantToRetire) {
nodeRepository.retire(zoneId, node.hostname().value(), wantToRetire, false);
}
private void approveChangeRequest(VespaChangeRequest changeRequest) {
if (!system.equals(SystemName.main))
return;
if (changeRequest.getStatus() == Status.REQUIRES_OPERATOR_ACTION)
return;
if (changeRequest.getApproval() != ChangeRequest.Approval.REQUESTED)
return;
LOG.info("Approving " + changeRequest.getChangeRequestSource().getId());
changeRequestClient.approveChangeRequest(changeRequest);
}
private void removeReport(ZoneId zoneId, VespaChangeRequest changeRequest, Node node) {
var report = VcmrReport.fromReports(node.reports());
if (report.removeVcmr(changeRequest.getChangeRequestSource().getId())) {
updateReport(zoneId, node, report);
}
}
private void addReport(ZoneId zoneId, VespaChangeRequest changeRequest, Node node) {
var report = VcmrReport.fromReports(node.reports());
var source = changeRequest.getChangeRequestSource();
if (report.addVcmr(source.getId(), source.getPlannedStartTime(), source.getPlannedEndTime())) {
updateReport(zoneId, node, report);
}
}
private void updateReport(ZoneId zoneId, Node node, VcmrReport report) {
LOG.fine(() -> Text.format("Updating report for %s: %s", node.hostname(), report));
nodeRepository.updateReports(zoneId, node.hostname().value(), report.toNodeReports());
}
protected ZonedDateTime getRetirementStartTime(ZonedDateTime plannedStartTime) {
var time = plannedStartTime;
var days = 0;
while (days < DAYS_TO_RETIRE) {
time = time.minusDays(1);
if (time.getDayOfWeek().getValue() < 6) days++;
}
return time;
}
} |
Won't this allow allocation of new nodes on a host we're trying to fail? Maybe we'll just have to live with that. | private boolean failActive(FailingNode failing) {
Optional<Deployment> deployment =
deployer.deployFromLocalActive(failing.node().allocation().get().owner(), Duration.ofMinutes(30));
if (deployment.isEmpty()) return false;
List<FailingNode> activeChildrenToFail = new ArrayList<>();
try (NodeMutex lock = nodeRepository().nodes().lockAndGetRequired(failing.node())) {
if (!Objects.equals(failing.node().allocation().map(Allocation::owner), lock.node().allocation().map(Allocation::owner)))
return false;
if (lock.node().state() == Node.State.failed)
return true;
if (!Objects.equals(failing.node().state(), lock.node().state()))
return false;
failing = new FailingNode(lock.node(), failing.reason);
String reasonForChildFailure = "Failing due to parent host " + failing.node().hostname() + " failure: " + failing.reason();
for (Node failingTenantNode : nodeRepository().nodes().list().childrenOf(failing.node())) {
if (failingTenantNode.state() == Node.State.active) {
activeChildrenToFail.add(new FailingNode(failingTenantNode, reasonForChildFailure));
} else if (failingTenantNode.state() != Node.State.failed) {
nodeRepository().nodes().fail(failingTenantNode.hostname(), Agent.NodeFailer, reasonForChildFailure);
}
}
if (activeChildrenToFail.isEmpty()) {
wantToFail(failing.node(), true, lock);
try {
deployment.get().activate();
return true;
} catch (TransientException e) {
log.log(Level.INFO, "Failed to redeploy " + failing.node().allocation().get().owner() +
" with a transient error, will be retried by application maintainer: " +
Exceptions.toMessageString(e));
return true;
} catch (RuntimeException e) {
nodeRepository().nodes().node(failing.node().hostname())
.ifPresent(n -> wantToFail(n, false, lock));
log.log(Level.WARNING, "Could not fail " + failing.node() + " for " + failing.node().allocation().get().owner() +
" for " + failing.reason() + ": " + Exceptions.toMessageString(e));
return false;
}
}
}
activeChildrenToFail.forEach(this::failActive);
return false;
} | wantToFail(failing.node(), true, lock); | private boolean failActive(FailingNode failing) {
Optional<Deployment> deployment =
deployer.deployFromLocalActive(failing.node().allocation().get().owner(), Duration.ofMinutes(30));
if (deployment.isEmpty()) return false;
List<FailingNode> activeChildrenToFail = new ArrayList<>();
try (NodeMutex lock = nodeRepository().nodes().lockAndGetRequired(failing.node())) {
if (!Objects.equals(failing.node().allocation().map(Allocation::owner), lock.node().allocation().map(Allocation::owner)))
return false;
if (lock.node().state() == Node.State.failed)
return true;
if (!Objects.equals(failing.node().state(), lock.node().state()))
return false;
failing = new FailingNode(lock.node(), failing.reason);
String reasonForChildFailure = "Failing due to parent host " + failing.node().hostname() + " failure: " + failing.reason();
for (Node failingTenantNode : nodeRepository().nodes().list().childrenOf(failing.node())) {
if (failingTenantNode.state() == Node.State.active) {
activeChildrenToFail.add(new FailingNode(failingTenantNode, reasonForChildFailure));
} else if (failingTenantNode.state() != Node.State.failed) {
nodeRepository().nodes().fail(failingTenantNode.hostname(), Agent.NodeFailer, reasonForChildFailure);
}
}
if (activeChildrenToFail.isEmpty()) {
wantToFail(failing.node(), true, lock);
try {
deployment.get().activate();
return true;
} catch (TransientException e) {
log.log(Level.INFO, "Failed to redeploy " + failing.node().allocation().get().owner() +
" with a transient error, will be retried by application maintainer: " +
Exceptions.toMessageString(e));
return true;
} catch (RuntimeException e) {
nodeRepository().nodes().node(failing.node().hostname())
.ifPresent(n -> wantToFail(n, false, lock));
log.log(Level.WARNING, "Could not fail " + failing.node() + " for " + failing.node().allocation().get().owner() +
" for " + failing.reason() + ": " + Exceptions.toMessageString(e));
return false;
}
}
}
activeChildrenToFail.forEach(this::failActive);
return false;
} | class NodeFailer extends NodeRepositoryMaintainer {
private static final Logger log = Logger.getLogger(NodeFailer.class.getName());
/** Metric for number of hosts that we want to fail, but cannot due to throttling */
static final String throttledHostFailuresMetric = "throttledHostFailures";
/** Metric for number of nodes that we want to fail, but cannot due to throttling */
static final String throttledNodeFailuresMetric = "throttledNodeFailures";
/** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */
static final String throttlingActiveMetric = "nodeFailThrottling";
private final Deployer deployer;
private final Duration downTimeLimit;
private final Duration suspendedDownTimeLimit;
private final ThrottlePolicy throttlePolicy;
private final Metric metric;
public NodeFailer(Deployer deployer, NodeRepository nodeRepository,
Duration downTimeLimit, Duration interval, ThrottlePolicy throttlePolicy, Metric metric) {
super(nodeRepository, min(downTimeLimit.dividedBy(2), interval), metric);
this.deployer = deployer;
this.downTimeLimit = downTimeLimit;
this.suspendedDownTimeLimit = downTimeLimit.multipliedBy(4);
this.throttlePolicy = throttlePolicy;
this.metric = metric;
}
@Override
protected double maintain() {
if ( ! nodeRepository().nodes().isWorking()) return 0.0;
int attempts = 0;
int failures = 0;
int throttledHostFailures = 0;
int throttledNodeFailures = 0;
try (Mutex lock = nodeRepository().nodes().lockUnallocated()) {
for (FailingNode failing : findReadyFailingNodes()) {
attempts++;
if (throttle(failing.node())) {
failures++;
if (failing.node().type().isHost())
throttledHostFailures++;
else
throttledNodeFailures++;
continue;
}
nodeRepository().nodes().fail(failing.node().hostname(), Agent.NodeFailer, failing.reason());
}
}
for (FailingNode failing : findActiveFailingNodes()) {
attempts++;
if (!failAllowedFor(failing.node().type())) continue;
if (throttle(failing.node())) {
failures++;
if (failing.node().type().isHost())
throttledHostFailures++;
else
throttledNodeFailures++;
continue;
}
failActive(failing);
}
int throttlingActive = Math.min(1, throttledHostFailures + throttledNodeFailures);
metric.set(throttlingActiveMetric, throttlingActive, null);
metric.set(throttledHostFailuresMetric, throttledHostFailures, null);
metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null);
return asSuccessFactor(attempts, failures);
}
private Collection<FailingNode> findReadyFailingNodes() {
Set<FailingNode> failingNodes = new HashSet<>();
for (Node node : nodeRepository().nodes().list(Node.State.ready)) {
Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().nodes().node(parent)).orElse(node);
List<String> failureReports = reasonsToFailHost(hostNode);
if (failureReports.size() > 0) {
if (hostNode.equals(node)) {
failingNodes.add(new FailingNode(node, "Host has failure reports: " + failureReports));
} else {
failingNodes.add(new FailingNode(node, "Parent (" + hostNode + ") has failure reports: " + failureReports));
}
}
}
return failingNodes;
}
private Collection<FailingNode> findActiveFailingNodes() {
Set<FailingNode> failingNodes = new HashSet<>();
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
for (Node host : activeNodes.hosts().failing())
failingNodes.add(new FailingNode(host, "Host should be failed and have no tenant nodes"));
for (Node node : activeNodes) {
Instant graceTimeStart = clock().instant().minus(nodeRepository().nodes().suspended(node) ? suspendedDownTimeLimit : downTimeLimit);
if (node.isDown() && node.history().hasEventBefore(History.Event.Type.down, graceTimeStart) && !applicationSuspended(node)) {
if (!node.history().hasEventAfter(History.Event.Type.activated, graceTimeStart))
failingNodes.add(new FailingNode(node, "Node has been down longer than " + downTimeLimit));
}
}
for (Node node : activeNodes) {
if (allSuspended(node, activeNodes)) {
Node host = node.parentHostname().flatMap(parent -> activeNodes.node(parent)).orElse(node);
if (host.type().isHost()) {
List<String> failureReports = reasonsToFailHost(host);
if ( ! failureReports.isEmpty()) {
failingNodes.add(new FailingNode(node, host.equals(node) ?
"Host has failure reports: " + failureReports :
"Parent " + host + " has failure reports: " + failureReports));
}
}
}
}
return failingNodes;
}
public static List<String> reasonsToFailHost(Node host) {
return host.reports().getReports().stream()
.filter(report -> report.getType().hostShouldBeFailed())
.map(report -> report.getReportId() + " reported " + report.getCreatedTime() + ": " + report.getDescription())
.collect(Collectors.toList());
}
/** Returns whether node has any kind of hardware issue */
static boolean hasHardwareIssue(Node node, NodeList allNodes) {
Node host = node.parentHostname().flatMap(parent -> allNodes.node(parent)).orElse(node);
return reasonsToFailHost(host).size() > 0;
}
private boolean applicationSuspended(Node node) {
try {
return nodeRepository().orchestrator().getApplicationInstanceStatus(node.allocation().get().owner())
== ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN;
} catch (ApplicationIdNotFoundException e) {
return false;
}
}
/** Is the node and all active children suspended? */
private boolean allSuspended(Node node, NodeList activeNodes) {
if (!nodeRepository().nodes().suspended(node)) return false;
if (node.parentHostname().isPresent()) return true;
return activeNodes.childrenOf(node.hostname()).stream().allMatch(nodeRepository().nodes()::suspended);
}
/**
* We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected
* unless the node is replaced.
* We can also attempt to fail a single proxy(host) as there should be enough redundancy to handle that.
* But we refuse to fail out config(host)/controller(host)
*/
private boolean failAllowedFor(NodeType nodeType) {
switch (nodeType) {
case tenant:
case host:
return true;
case proxy:
case proxyhost:
return nodeRepository().nodes().list(Node.State.failed).nodeType(nodeType).isEmpty();
default:
return false;
}
}
/**
* Called when a node should be moved to the failed state: Do that if it seems safe,
* which is when the node repo has available capacity to replace the node (and all its tenant nodes if host).
* Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken.
*
* @return whether node was successfully failed
*/
private void wantToFail(Node node, boolean wantToFail, Mutex lock) {
if (!node.status().wantToFail())
nodeRepository().nodes().write(node.withWantToFail(wantToFail, Agent.NodeFailer, clock().instant()), lock);
}
/** Returns true if node failing should be throttled */
private boolean throttle(Node node) {
if (throttlePolicy == ThrottlePolicy.disabled) return false;
Instant startOfThrottleWindow = clock().instant().minus(throttlePolicy.throttleWindow);
NodeList allNodes = nodeRepository().nodes().list();
NodeList recentlyFailedNodes = allNodes
.matching(n -> n.status().wantToFail() ||
(n.state() == Node.State.failed &&
n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow)));
if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(allNodes.size())) return false;
if (node.parentHostname().isEmpty()) {
Set<String> parentsOfRecentlyFailedNodes = recentlyFailedNodes.stream()
.map(n -> n.parentHostname().orElse(n.hostname()))
.collect(Collectors.toSet());
long potentiallyFailed = parentsOfRecentlyFailedNodes.contains(node.hostname()) ?
parentsOfRecentlyFailedNodes.size() :
parentsOfRecentlyFailedNodes.size() + 1;
if (potentiallyFailed <= throttlePolicy.minimumAllowedToFail) return false;
}
if (recentlyFailedNodes.parentOf(node).isPresent()) return false;
log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(),
throttlePolicy.toHumanReadableString(allNodes.size())));
return true;
}
public enum ThrottlePolicy {
hosted(Duration.ofDays(1), 0.03, 2),
disabled(Duration.ZERO, 0, 0);
private final Duration throttleWindow;
private final double fractionAllowedToFail;
private final int minimumAllowedToFail;
ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) {
this.throttleWindow = throttleWindow;
this.fractionAllowedToFail = fractionAllowedToFail;
this.minimumAllowedToFail = minimumAllowedToFail;
}
public int allowedToFailOf(int totalNodes) {
return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail);
}
public String toHumanReadableString(int totalNodes) {
return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100,
allowedToFailOf(totalNodes),
minimumAllowedToFail, throttleWindow);
}
}
private static class FailingNode {
private final Node node;
private final String reason;
public FailingNode(Node node, String reason) {
this.node = node;
this.reason = reason;
}
public Node node() { return node; }
public String reason() { return reason; }
@Override
public boolean equals(Object other) {
if ( ! (other instanceof FailingNode)) return false;
return ((FailingNode)other).node().equals(this.node());
}
@Override
public int hashCode() {
return node.hashCode();
}
}
} | class NodeFailer extends NodeRepositoryMaintainer {
private static final Logger log = Logger.getLogger(NodeFailer.class.getName());
/** Metric for number of hosts that we want to fail, but cannot due to throttling */
static final String throttledHostFailuresMetric = "throttledHostFailures";
/** Metric for number of nodes that we want to fail, but cannot due to throttling */
static final String throttledNodeFailuresMetric = "throttledNodeFailures";
/** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */
static final String throttlingActiveMetric = "nodeFailThrottling";
private final Deployer deployer;
private final Duration downTimeLimit;
private final Duration suspendedDownTimeLimit;
private final ThrottlePolicy throttlePolicy;
private final Metric metric;
public NodeFailer(Deployer deployer, NodeRepository nodeRepository,
Duration downTimeLimit, Duration interval, ThrottlePolicy throttlePolicy, Metric metric) {
super(nodeRepository, min(downTimeLimit.dividedBy(2), interval), metric);
this.deployer = deployer;
this.downTimeLimit = downTimeLimit;
this.suspendedDownTimeLimit = downTimeLimit.multipliedBy(4);
this.throttlePolicy = throttlePolicy;
this.metric = metric;
}
@Override
protected double maintain() {
if ( ! nodeRepository().nodes().isWorking()) return 0.0;
int attempts = 0;
int failures = 0;
int throttledHostFailures = 0;
int throttledNodeFailures = 0;
try (Mutex lock = nodeRepository().nodes().lockUnallocated()) {
for (FailingNode failing : findReadyFailingNodes()) {
attempts++;
if (throttle(failing.node())) {
failures++;
if (failing.node().type().isHost())
throttledHostFailures++;
else
throttledNodeFailures++;
continue;
}
nodeRepository().nodes().fail(failing.node().hostname(), Agent.NodeFailer, failing.reason());
}
}
for (FailingNode failing : findActiveFailingNodes()) {
attempts++;
if (!failAllowedFor(failing.node().type())) continue;
if (throttle(failing.node())) {
failures++;
if (failing.node().type().isHost())
throttledHostFailures++;
else
throttledNodeFailures++;
continue;
}
failActive(failing);
}
int throttlingActive = Math.min(1, throttledHostFailures + throttledNodeFailures);
metric.set(throttlingActiveMetric, throttlingActive, null);
metric.set(throttledHostFailuresMetric, throttledHostFailures, null);
metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null);
return asSuccessFactor(attempts, failures);
}
private Collection<FailingNode> findReadyFailingNodes() {
Set<FailingNode> failingNodes = new HashSet<>();
for (Node node : nodeRepository().nodes().list(Node.State.ready)) {
Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().nodes().node(parent)).orElse(node);
List<String> failureReports = reasonsToFailHost(hostNode);
if (failureReports.size() > 0) {
if (hostNode.equals(node)) {
failingNodes.add(new FailingNode(node, "Host has failure reports: " + failureReports));
} else {
failingNodes.add(new FailingNode(node, "Parent (" + hostNode + ") has failure reports: " + failureReports));
}
}
}
return failingNodes;
}
private Collection<FailingNode> findActiveFailingNodes() {
Set<FailingNode> failingNodes = new HashSet<>();
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
for (Node host : activeNodes.hosts().failing())
failingNodes.add(new FailingNode(host, "Host should be failed and have no tenant nodes"));
for (Node node : activeNodes) {
Instant graceTimeStart = clock().instant().minus(nodeRepository().nodes().suspended(node) ? suspendedDownTimeLimit : downTimeLimit);
if (node.isDown() && node.history().hasEventBefore(History.Event.Type.down, graceTimeStart) && !applicationSuspended(node)) {
if (!node.history().hasEventAfter(History.Event.Type.activated, graceTimeStart))
failingNodes.add(new FailingNode(node, "Node has been down longer than " + downTimeLimit));
}
}
for (Node node : activeNodes) {
if (allSuspended(node, activeNodes)) {
Node host = node.parentHostname().flatMap(parent -> activeNodes.node(parent)).orElse(node);
if (host.type().isHost()) {
List<String> failureReports = reasonsToFailHost(host);
if ( ! failureReports.isEmpty()) {
failingNodes.add(new FailingNode(node, host.equals(node) ?
"Host has failure reports: " + failureReports :
"Parent " + host + " has failure reports: " + failureReports));
}
}
}
}
return failingNodes;
}
public static List<String> reasonsToFailHost(Node host) {
return host.reports().getReports().stream()
.filter(report -> report.getType().hostShouldBeFailed())
.map(report -> report.getReportId() + " reported " + report.getCreatedTime() + ": " + report.getDescription())
.collect(Collectors.toList());
}
/** Returns whether node has any kind of hardware issue */
static boolean hasHardwareIssue(Node node, NodeList allNodes) {
Node host = node.parentHostname().flatMap(parent -> allNodes.node(parent)).orElse(node);
return reasonsToFailHost(host).size() > 0;
}
private boolean applicationSuspended(Node node) {
try {
return nodeRepository().orchestrator().getApplicationInstanceStatus(node.allocation().get().owner())
== ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN;
} catch (ApplicationIdNotFoundException e) {
return false;
}
}
/** Is the node and all active children suspended? */
private boolean allSuspended(Node node, NodeList activeNodes) {
if (!nodeRepository().nodes().suspended(node)) return false;
if (node.parentHostname().isPresent()) return true;
return activeNodes.childrenOf(node.hostname()).stream().allMatch(nodeRepository().nodes()::suspended);
}
/**
* We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected
* unless the node is replaced.
* We can also attempt to fail a single proxy(host) as there should be enough redundancy to handle that.
* But we refuse to fail out config(host)/controller(host)
*/
private boolean failAllowedFor(NodeType nodeType) {
switch (nodeType) {
case tenant:
case host:
return true;
case proxy:
case proxyhost:
return nodeRepository().nodes().list(Node.State.failed).nodeType(nodeType).isEmpty();
default:
return false;
}
}
/**
* Called when a node should be moved to the failed state: Do that if it seems safe,
* which is when the node repo has available capacity to replace the node (and all its tenant nodes if host).
* Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken.
*
* @return whether node was successfully failed
*/
private void wantToFail(Node node, boolean wantToFail, Mutex lock) {
if (!node.status().wantToFail())
nodeRepository().nodes().write(node.withWantToFail(wantToFail, Agent.NodeFailer, clock().instant()), lock);
}
/** Returns true if node failing should be throttled */
private boolean throttle(Node node) {
if (throttlePolicy == ThrottlePolicy.disabled) return false;
Instant startOfThrottleWindow = clock().instant().minus(throttlePolicy.throttleWindow);
NodeList allNodes = nodeRepository().nodes().list();
NodeList recentlyFailedNodes = allNodes
.matching(n -> n.status().wantToFail() ||
(n.state() == Node.State.failed &&
n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow)));
if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(allNodes.size())) return false;
if (node.parentHostname().isEmpty()) {
Set<String> parentsOfRecentlyFailedNodes = recentlyFailedNodes.stream()
.map(n -> n.parentHostname().orElse(n.hostname()))
.collect(Collectors.toSet());
long potentiallyFailed = parentsOfRecentlyFailedNodes.contains(node.hostname()) ?
parentsOfRecentlyFailedNodes.size() :
parentsOfRecentlyFailedNodes.size() + 1;
if (potentiallyFailed <= throttlePolicy.minimumAllowedToFail) return false;
}
if (recentlyFailedNodes.parentOf(node).isPresent()) return false;
log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(),
throttlePolicy.toHumanReadableString(allNodes.size())));
return true;
}
public enum ThrottlePolicy {
hosted(Duration.ofDays(1), 0.03, 2),
disabled(Duration.ZERO, 0, 0);
private final Duration throttleWindow;
private final double fractionAllowedToFail;
private final int minimumAllowedToFail;
ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) {
this.throttleWindow = throttleWindow;
this.fractionAllowedToFail = fractionAllowedToFail;
this.minimumAllowedToFail = minimumAllowedToFail;
}
public int allowedToFailOf(int totalNodes) {
return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail);
}
public String toHumanReadableString(int totalNodes) {
return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100,
allowedToFailOf(totalNodes),
minimumAllowedToFail, throttleWindow);
}
}
private static class FailingNode {
private final Node node;
private final String reason;
public FailingNode(Node node, String reason) {
this.node = node;
this.reason = reason;
}
public Node node() { return node; }
public String reason() { return reason; }
@Override
public boolean equals(Object other) {
if ( ! (other instanceof FailingNode)) return false;
return ((FailingNode)other).node().equals(this.node());
}
@Override
public int hashCode() {
return node.hashCode();
}
}
} |
Yes, I think we will have to live with that. We could invent some other marker to prevent allocation and ensure that stays in sync with the intention of the NodeFailer, but that's a bigger fix. | private boolean failActive(FailingNode failing) {
Optional<Deployment> deployment =
deployer.deployFromLocalActive(failing.node().allocation().get().owner(), Duration.ofMinutes(30));
if (deployment.isEmpty()) return false;
List<FailingNode> activeChildrenToFail = new ArrayList<>();
try (NodeMutex lock = nodeRepository().nodes().lockAndGetRequired(failing.node())) {
if (!Objects.equals(failing.node().allocation().map(Allocation::owner), lock.node().allocation().map(Allocation::owner)))
return false;
if (lock.node().state() == Node.State.failed)
return true;
if (!Objects.equals(failing.node().state(), lock.node().state()))
return false;
failing = new FailingNode(lock.node(), failing.reason);
String reasonForChildFailure = "Failing due to parent host " + failing.node().hostname() + " failure: " + failing.reason();
for (Node failingTenantNode : nodeRepository().nodes().list().childrenOf(failing.node())) {
if (failingTenantNode.state() == Node.State.active) {
activeChildrenToFail.add(new FailingNode(failingTenantNode, reasonForChildFailure));
} else if (failingTenantNode.state() != Node.State.failed) {
nodeRepository().nodes().fail(failingTenantNode.hostname(), Agent.NodeFailer, reasonForChildFailure);
}
}
if (activeChildrenToFail.isEmpty()) {
wantToFail(failing.node(), true, lock);
try {
deployment.get().activate();
return true;
} catch (TransientException e) {
log.log(Level.INFO, "Failed to redeploy " + failing.node().allocation().get().owner() +
" with a transient error, will be retried by application maintainer: " +
Exceptions.toMessageString(e));
return true;
} catch (RuntimeException e) {
nodeRepository().nodes().node(failing.node().hostname())
.ifPresent(n -> wantToFail(n, false, lock));
log.log(Level.WARNING, "Could not fail " + failing.node() + " for " + failing.node().allocation().get().owner() +
" for " + failing.reason() + ": " + Exceptions.toMessageString(e));
return false;
}
}
}
activeChildrenToFail.forEach(this::failActive);
return false;
} | wantToFail(failing.node(), true, lock); | private boolean failActive(FailingNode failing) {
Optional<Deployment> deployment =
deployer.deployFromLocalActive(failing.node().allocation().get().owner(), Duration.ofMinutes(30));
if (deployment.isEmpty()) return false;
List<FailingNode> activeChildrenToFail = new ArrayList<>();
try (NodeMutex lock = nodeRepository().nodes().lockAndGetRequired(failing.node())) {
if (!Objects.equals(failing.node().allocation().map(Allocation::owner), lock.node().allocation().map(Allocation::owner)))
return false;
if (lock.node().state() == Node.State.failed)
return true;
if (!Objects.equals(failing.node().state(), lock.node().state()))
return false;
failing = new FailingNode(lock.node(), failing.reason);
String reasonForChildFailure = "Failing due to parent host " + failing.node().hostname() + " failure: " + failing.reason();
for (Node failingTenantNode : nodeRepository().nodes().list().childrenOf(failing.node())) {
if (failingTenantNode.state() == Node.State.active) {
activeChildrenToFail.add(new FailingNode(failingTenantNode, reasonForChildFailure));
} else if (failingTenantNode.state() != Node.State.failed) {
nodeRepository().nodes().fail(failingTenantNode.hostname(), Agent.NodeFailer, reasonForChildFailure);
}
}
if (activeChildrenToFail.isEmpty()) {
wantToFail(failing.node(), true, lock);
try {
deployment.get().activate();
return true;
} catch (TransientException e) {
log.log(Level.INFO, "Failed to redeploy " + failing.node().allocation().get().owner() +
" with a transient error, will be retried by application maintainer: " +
Exceptions.toMessageString(e));
return true;
} catch (RuntimeException e) {
nodeRepository().nodes().node(failing.node().hostname())
.ifPresent(n -> wantToFail(n, false, lock));
log.log(Level.WARNING, "Could not fail " + failing.node() + " for " + failing.node().allocation().get().owner() +
" for " + failing.reason() + ": " + Exceptions.toMessageString(e));
return false;
}
}
}
activeChildrenToFail.forEach(this::failActive);
return false;
} | class NodeFailer extends NodeRepositoryMaintainer {
private static final Logger log = Logger.getLogger(NodeFailer.class.getName());
/** Metric for number of hosts that we want to fail, but cannot due to throttling */
static final String throttledHostFailuresMetric = "throttledHostFailures";
/** Metric for number of nodes that we want to fail, but cannot due to throttling */
static final String throttledNodeFailuresMetric = "throttledNodeFailures";
/** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */
static final String throttlingActiveMetric = "nodeFailThrottling";
private final Deployer deployer;
private final Duration downTimeLimit;
private final Duration suspendedDownTimeLimit;
private final ThrottlePolicy throttlePolicy;
private final Metric metric;
public NodeFailer(Deployer deployer, NodeRepository nodeRepository,
Duration downTimeLimit, Duration interval, ThrottlePolicy throttlePolicy, Metric metric) {
super(nodeRepository, min(downTimeLimit.dividedBy(2), interval), metric);
this.deployer = deployer;
this.downTimeLimit = downTimeLimit;
this.suspendedDownTimeLimit = downTimeLimit.multipliedBy(4);
this.throttlePolicy = throttlePolicy;
this.metric = metric;
}
@Override
protected double maintain() {
if ( ! nodeRepository().nodes().isWorking()) return 0.0;
int attempts = 0;
int failures = 0;
int throttledHostFailures = 0;
int throttledNodeFailures = 0;
try (Mutex lock = nodeRepository().nodes().lockUnallocated()) {
for (FailingNode failing : findReadyFailingNodes()) {
attempts++;
if (throttle(failing.node())) {
failures++;
if (failing.node().type().isHost())
throttledHostFailures++;
else
throttledNodeFailures++;
continue;
}
nodeRepository().nodes().fail(failing.node().hostname(), Agent.NodeFailer, failing.reason());
}
}
for (FailingNode failing : findActiveFailingNodes()) {
attempts++;
if (!failAllowedFor(failing.node().type())) continue;
if (throttle(failing.node())) {
failures++;
if (failing.node().type().isHost())
throttledHostFailures++;
else
throttledNodeFailures++;
continue;
}
failActive(failing);
}
int throttlingActive = Math.min(1, throttledHostFailures + throttledNodeFailures);
metric.set(throttlingActiveMetric, throttlingActive, null);
metric.set(throttledHostFailuresMetric, throttledHostFailures, null);
metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null);
return asSuccessFactor(attempts, failures);
}
private Collection<FailingNode> findReadyFailingNodes() {
Set<FailingNode> failingNodes = new HashSet<>();
for (Node node : nodeRepository().nodes().list(Node.State.ready)) {
Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().nodes().node(parent)).orElse(node);
List<String> failureReports = reasonsToFailHost(hostNode);
if (failureReports.size() > 0) {
if (hostNode.equals(node)) {
failingNodes.add(new FailingNode(node, "Host has failure reports: " + failureReports));
} else {
failingNodes.add(new FailingNode(node, "Parent (" + hostNode + ") has failure reports: " + failureReports));
}
}
}
return failingNodes;
}
private Collection<FailingNode> findActiveFailingNodes() {
Set<FailingNode> failingNodes = new HashSet<>();
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
for (Node host : activeNodes.hosts().failing())
failingNodes.add(new FailingNode(host, "Host should be failed and have no tenant nodes"));
for (Node node : activeNodes) {
Instant graceTimeStart = clock().instant().minus(nodeRepository().nodes().suspended(node) ? suspendedDownTimeLimit : downTimeLimit);
if (node.isDown() && node.history().hasEventBefore(History.Event.Type.down, graceTimeStart) && !applicationSuspended(node)) {
if (!node.history().hasEventAfter(History.Event.Type.activated, graceTimeStart))
failingNodes.add(new FailingNode(node, "Node has been down longer than " + downTimeLimit));
}
}
for (Node node : activeNodes) {
if (allSuspended(node, activeNodes)) {
Node host = node.parentHostname().flatMap(parent -> activeNodes.node(parent)).orElse(node);
if (host.type().isHost()) {
List<String> failureReports = reasonsToFailHost(host);
if ( ! failureReports.isEmpty()) {
failingNodes.add(new FailingNode(node, host.equals(node) ?
"Host has failure reports: " + failureReports :
"Parent " + host + " has failure reports: " + failureReports));
}
}
}
}
return failingNodes;
}
public static List<String> reasonsToFailHost(Node host) {
return host.reports().getReports().stream()
.filter(report -> report.getType().hostShouldBeFailed())
.map(report -> report.getReportId() + " reported " + report.getCreatedTime() + ": " + report.getDescription())
.collect(Collectors.toList());
}
/** Returns whether node has any kind of hardware issue */
static boolean hasHardwareIssue(Node node, NodeList allNodes) {
Node host = node.parentHostname().flatMap(parent -> allNodes.node(parent)).orElse(node);
return reasonsToFailHost(host).size() > 0;
}
private boolean applicationSuspended(Node node) {
try {
return nodeRepository().orchestrator().getApplicationInstanceStatus(node.allocation().get().owner())
== ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN;
} catch (ApplicationIdNotFoundException e) {
return false;
}
}
/** Is the node and all active children suspended? */
private boolean allSuspended(Node node, NodeList activeNodes) {
if (!nodeRepository().nodes().suspended(node)) return false;
if (node.parentHostname().isPresent()) return true;
return activeNodes.childrenOf(node.hostname()).stream().allMatch(nodeRepository().nodes()::suspended);
}
/**
* We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected
* unless the node is replaced.
* We can also attempt to fail a single proxy(host) as there should be enough redundancy to handle that.
* But we refuse to fail out config(host)/controller(host)
*/
private boolean failAllowedFor(NodeType nodeType) {
switch (nodeType) {
case tenant:
case host:
return true;
case proxy:
case proxyhost:
return nodeRepository().nodes().list(Node.State.failed).nodeType(nodeType).isEmpty();
default:
return false;
}
}
/**
* Called when a node should be moved to the failed state: Do that if it seems safe,
* which is when the node repo has available capacity to replace the node (and all its tenant nodes if host).
* Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken.
*
* @return whether node was successfully failed
*/
private void wantToFail(Node node, boolean wantToFail, Mutex lock) {
if (!node.status().wantToFail())
nodeRepository().nodes().write(node.withWantToFail(wantToFail, Agent.NodeFailer, clock().instant()), lock);
}
/** Returns true if node failing should be throttled */
private boolean throttle(Node node) {
if (throttlePolicy == ThrottlePolicy.disabled) return false;
Instant startOfThrottleWindow = clock().instant().minus(throttlePolicy.throttleWindow);
NodeList allNodes = nodeRepository().nodes().list();
NodeList recentlyFailedNodes = allNodes
.matching(n -> n.status().wantToFail() ||
(n.state() == Node.State.failed &&
n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow)));
if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(allNodes.size())) return false;
if (node.parentHostname().isEmpty()) {
Set<String> parentsOfRecentlyFailedNodes = recentlyFailedNodes.stream()
.map(n -> n.parentHostname().orElse(n.hostname()))
.collect(Collectors.toSet());
long potentiallyFailed = parentsOfRecentlyFailedNodes.contains(node.hostname()) ?
parentsOfRecentlyFailedNodes.size() :
parentsOfRecentlyFailedNodes.size() + 1;
if (potentiallyFailed <= throttlePolicy.minimumAllowedToFail) return false;
}
if (recentlyFailedNodes.parentOf(node).isPresent()) return false;
log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(),
throttlePolicy.toHumanReadableString(allNodes.size())));
return true;
}
public enum ThrottlePolicy {
hosted(Duration.ofDays(1), 0.03, 2),
disabled(Duration.ZERO, 0, 0);
private final Duration throttleWindow;
private final double fractionAllowedToFail;
private final int minimumAllowedToFail;
ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) {
this.throttleWindow = throttleWindow;
this.fractionAllowedToFail = fractionAllowedToFail;
this.minimumAllowedToFail = minimumAllowedToFail;
}
public int allowedToFailOf(int totalNodes) {
return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail);
}
public String toHumanReadableString(int totalNodes) {
return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100,
allowedToFailOf(totalNodes),
minimumAllowedToFail, throttleWindow);
}
}
private static class FailingNode {
private final Node node;
private final String reason;
public FailingNode(Node node, String reason) {
this.node = node;
this.reason = reason;
}
public Node node() { return node; }
public String reason() { return reason; }
@Override
public boolean equals(Object other) {
if ( ! (other instanceof FailingNode)) return false;
return ((FailingNode)other).node().equals(this.node());
}
@Override
public int hashCode() {
return node.hashCode();
}
}
} | class NodeFailer extends NodeRepositoryMaintainer {
private static final Logger log = Logger.getLogger(NodeFailer.class.getName());
/** Metric for number of hosts that we want to fail, but cannot due to throttling */
static final String throttledHostFailuresMetric = "throttledHostFailures";
/** Metric for number of nodes that we want to fail, but cannot due to throttling */
static final String throttledNodeFailuresMetric = "throttledNodeFailures";
/** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */
static final String throttlingActiveMetric = "nodeFailThrottling";
private final Deployer deployer;
private final Duration downTimeLimit;
private final Duration suspendedDownTimeLimit;
private final ThrottlePolicy throttlePolicy;
private final Metric metric;
public NodeFailer(Deployer deployer, NodeRepository nodeRepository,
Duration downTimeLimit, Duration interval, ThrottlePolicy throttlePolicy, Metric metric) {
super(nodeRepository, min(downTimeLimit.dividedBy(2), interval), metric);
this.deployer = deployer;
this.downTimeLimit = downTimeLimit;
this.suspendedDownTimeLimit = downTimeLimit.multipliedBy(4);
this.throttlePolicy = throttlePolicy;
this.metric = metric;
}
@Override
protected double maintain() {
if ( ! nodeRepository().nodes().isWorking()) return 0.0;
int attempts = 0;
int failures = 0;
int throttledHostFailures = 0;
int throttledNodeFailures = 0;
try (Mutex lock = nodeRepository().nodes().lockUnallocated()) {
for (FailingNode failing : findReadyFailingNodes()) {
attempts++;
if (throttle(failing.node())) {
failures++;
if (failing.node().type().isHost())
throttledHostFailures++;
else
throttledNodeFailures++;
continue;
}
nodeRepository().nodes().fail(failing.node().hostname(), Agent.NodeFailer, failing.reason());
}
}
for (FailingNode failing : findActiveFailingNodes()) {
attempts++;
if (!failAllowedFor(failing.node().type())) continue;
if (throttle(failing.node())) {
failures++;
if (failing.node().type().isHost())
throttledHostFailures++;
else
throttledNodeFailures++;
continue;
}
failActive(failing);
}
int throttlingActive = Math.min(1, throttledHostFailures + throttledNodeFailures);
metric.set(throttlingActiveMetric, throttlingActive, null);
metric.set(throttledHostFailuresMetric, throttledHostFailures, null);
metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null);
return asSuccessFactor(attempts, failures);
}
private Collection<FailingNode> findReadyFailingNodes() {
Set<FailingNode> failingNodes = new HashSet<>();
for (Node node : nodeRepository().nodes().list(Node.State.ready)) {
Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().nodes().node(parent)).orElse(node);
List<String> failureReports = reasonsToFailHost(hostNode);
if (failureReports.size() > 0) {
if (hostNode.equals(node)) {
failingNodes.add(new FailingNode(node, "Host has failure reports: " + failureReports));
} else {
failingNodes.add(new FailingNode(node, "Parent (" + hostNode + ") has failure reports: " + failureReports));
}
}
}
return failingNodes;
}
private Collection<FailingNode> findActiveFailingNodes() {
Set<FailingNode> failingNodes = new HashSet<>();
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
for (Node host : activeNodes.hosts().failing())
failingNodes.add(new FailingNode(host, "Host should be failed and have no tenant nodes"));
for (Node node : activeNodes) {
Instant graceTimeStart = clock().instant().minus(nodeRepository().nodes().suspended(node) ? suspendedDownTimeLimit : downTimeLimit);
if (node.isDown() && node.history().hasEventBefore(History.Event.Type.down, graceTimeStart) && !applicationSuspended(node)) {
if (!node.history().hasEventAfter(History.Event.Type.activated, graceTimeStart))
failingNodes.add(new FailingNode(node, "Node has been down longer than " + downTimeLimit));
}
}
for (Node node : activeNodes) {
if (allSuspended(node, activeNodes)) {
Node host = node.parentHostname().flatMap(parent -> activeNodes.node(parent)).orElse(node);
if (host.type().isHost()) {
List<String> failureReports = reasonsToFailHost(host);
if ( ! failureReports.isEmpty()) {
failingNodes.add(new FailingNode(node, host.equals(node) ?
"Host has failure reports: " + failureReports :
"Parent " + host + " has failure reports: " + failureReports));
}
}
}
}
return failingNodes;
}
public static List<String> reasonsToFailHost(Node host) {
return host.reports().getReports().stream()
.filter(report -> report.getType().hostShouldBeFailed())
.map(report -> report.getReportId() + " reported " + report.getCreatedTime() + ": " + report.getDescription())
.collect(Collectors.toList());
}
/** Returns whether node has any kind of hardware issue */
static boolean hasHardwareIssue(Node node, NodeList allNodes) {
Node host = node.parentHostname().flatMap(parent -> allNodes.node(parent)).orElse(node);
return reasonsToFailHost(host).size() > 0;
}
private boolean applicationSuspended(Node node) {
try {
return nodeRepository().orchestrator().getApplicationInstanceStatus(node.allocation().get().owner())
== ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN;
} catch (ApplicationIdNotFoundException e) {
return false;
}
}
/** Is the node and all active children suspended? */
private boolean allSuspended(Node node, NodeList activeNodes) {
if (!nodeRepository().nodes().suspended(node)) return false;
if (node.parentHostname().isPresent()) return true;
return activeNodes.childrenOf(node.hostname()).stream().allMatch(nodeRepository().nodes()::suspended);
}
/**
* We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected
* unless the node is replaced.
* We can also attempt to fail a single proxy(host) as there should be enough redundancy to handle that.
* But we refuse to fail out config(host)/controller(host)
*/
private boolean failAllowedFor(NodeType nodeType) {
switch (nodeType) {
case tenant:
case host:
return true;
case proxy:
case proxyhost:
return nodeRepository().nodes().list(Node.State.failed).nodeType(nodeType).isEmpty();
default:
return false;
}
}
/**
* Called when a node should be moved to the failed state: Do that if it seems safe,
* which is when the node repo has available capacity to replace the node (and all its tenant nodes if host).
* Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken.
*
* @return whether node was successfully failed
*/
private void wantToFail(Node node, boolean wantToFail, Mutex lock) {
if (!node.status().wantToFail())
nodeRepository().nodes().write(node.withWantToFail(wantToFail, Agent.NodeFailer, clock().instant()), lock);
}
/** Returns true if node failing should be throttled */
private boolean throttle(Node node) {
if (throttlePolicy == ThrottlePolicy.disabled) return false;
Instant startOfThrottleWindow = clock().instant().minus(throttlePolicy.throttleWindow);
NodeList allNodes = nodeRepository().nodes().list();
NodeList recentlyFailedNodes = allNodes
.matching(n -> n.status().wantToFail() ||
(n.state() == Node.State.failed &&
n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow)));
if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(allNodes.size())) return false;
if (node.parentHostname().isEmpty()) {
Set<String> parentsOfRecentlyFailedNodes = recentlyFailedNodes.stream()
.map(n -> n.parentHostname().orElse(n.hostname()))
.collect(Collectors.toSet());
long potentiallyFailed = parentsOfRecentlyFailedNodes.contains(node.hostname()) ?
parentsOfRecentlyFailedNodes.size() :
parentsOfRecentlyFailedNodes.size() + 1;
if (potentiallyFailed <= throttlePolicy.minimumAllowedToFail) return false;
}
if (recentlyFailedNodes.parentOf(node).isPresent()) return false;
log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(),
throttlePolicy.toHumanReadableString(allNodes.size())));
return true;
}
public enum ThrottlePolicy {
hosted(Duration.ofDays(1), 0.03, 2),
disabled(Duration.ZERO, 0, 0);
private final Duration throttleWindow;
private final double fractionAllowedToFail;
private final int minimumAllowedToFail;
ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) {
this.throttleWindow = throttleWindow;
this.fractionAllowedToFail = fractionAllowedToFail;
this.minimumAllowedToFail = minimumAllowedToFail;
}
public int allowedToFailOf(int totalNodes) {
return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail);
}
public String toHumanReadableString(int totalNodes) {
return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100,
allowedToFailOf(totalNodes),
minimumAllowedToFail, throttleWindow);
}
}
private static class FailingNode {
private final Node node;
private final String reason;
public FailingNode(Node node, String reason) {
this.node = node;
this.reason = reason;
}
public Node node() { return node; }
public String reason() { return reason; }
@Override
public boolean equals(Object other) {
if ( ! (other instanceof FailingNode)) return false;
return ((FailingNode)other).node().equals(this.node());
}
@Override
public int hashCode() {
return node.hashCode();
}
}
} |
Consider if we also want to expose the peer certificate information in `ConnectionAuthContext` even if it has no authz rules and is implicitly granted all capabilities | public ConnectionAuthContext authorizePeer(List<X509Certificate> certChain) {
if (authorizedPeers.isEmpty()) return ConnectionAuthContext.defaultAllCapabilities();
X509Certificate cert = certChain.get(0);
Set<String> matchedPolicies = new HashSet<>();
Set<CapabilitySet> grantedCapabilities = new HashSet<>();
String cn = getCommonName(cert).orElse(null);
List<String> sans = getSubjectAlternativeNames(cert);
log.fine(() -> String.format("Subject info from x509 certificate: CN=[%s], 'SAN=%s", cn, sans));
for (PeerPolicy peerPolicy : authorizedPeers.peerPolicies()) {
if (matchesPolicy(peerPolicy, cn, sans)) {
matchedPolicies.add(peerPolicy.policyName());
grantedCapabilities.add(peerPolicy.capabilities());
}
}
return new ConnectionAuthContext(certChain, CapabilitySet.unionOf(grantedCapabilities), matchedPolicies);
} | if (authorizedPeers.isEmpty()) return ConnectionAuthContext.defaultAllCapabilities(); | public ConnectionAuthContext authorizePeer(List<X509Certificate> certChain) {
if (authorizedPeers.isEmpty()) return ConnectionAuthContext.defaultAllCapabilities(certChain);
X509Certificate cert = certChain.get(0);
Set<String> matchedPolicies = new HashSet<>();
Set<CapabilitySet> grantedCapabilities = new HashSet<>();
String cn = getCommonName(cert).orElse(null);
List<String> sans = getSubjectAlternativeNames(cert);
log.fine(() -> String.format("Subject info from x509 certificate: CN=[%s], 'SAN=%s", cn, sans));
for (PeerPolicy peerPolicy : authorizedPeers.peerPolicies()) {
if (matchesPolicy(peerPolicy, cn, sans)) {
matchedPolicies.add(peerPolicy.policyName());
grantedCapabilities.add(peerPolicy.capabilities());
}
}
return new ConnectionAuthContext(certChain, CapabilitySet.unionOf(grantedCapabilities), matchedPolicies);
} | class PeerAuthorizer {
private static final Logger log = Logger.getLogger(PeerAuthorizer.class.getName());
private final AuthorizedPeers authorizedPeers;
public PeerAuthorizer(AuthorizedPeers authorizedPeers) {
this.authorizedPeers = authorizedPeers;
}
public ConnectionAuthContext authorizePeer(X509Certificate cert) { return authorizePeer(List.of(cert)); }
private static boolean matchesPolicy(PeerPolicy peerPolicy, String cn, List<String> sans) {
return peerPolicy.requiredCredentials().stream()
.allMatch(requiredCredential -> matchesRequiredCredentials(requiredCredential, cn, sans));
}
private static boolean matchesRequiredCredentials(RequiredPeerCredential requiredCredential, String cn, List<String> sans) {
switch (requiredCredential.field()) {
case CN:
return cn != null && requiredCredential.pattern().matches(cn);
case SAN_DNS:
case SAN_URI:
return sans.stream()
.anyMatch(san -> requiredCredential.pattern().matches(san));
default:
throw new RuntimeException("Unknown field: " + requiredCredential.field());
}
}
private static Optional<String> getCommonName(X509Certificate peerCertificate) {
return X509CertificateUtils.getSubjectCommonNames(peerCertificate).stream()
.findFirst();
}
private static List<String> getSubjectAlternativeNames(X509Certificate peerCertificate) {
return X509CertificateUtils.getSubjectAlternativeNames(peerCertificate).stream()
.filter(san -> san.getType() == DNS || san.getType() == IP || san.getType() == URI)
.map(SubjectAlternativeName::getValue)
.collect(toList());
}
} | class PeerAuthorizer {
private static final Logger log = Logger.getLogger(PeerAuthorizer.class.getName());
private final AuthorizedPeers authorizedPeers;
public PeerAuthorizer(AuthorizedPeers authorizedPeers) {
this.authorizedPeers = authorizedPeers;
}
public ConnectionAuthContext authorizePeer(X509Certificate cert) { return authorizePeer(List.of(cert)); }
private static boolean matchesPolicy(PeerPolicy peerPolicy, String cn, List<String> sans) {
return peerPolicy.requiredCredentials().stream()
.allMatch(requiredCredential -> matchesRequiredCredentials(requiredCredential, cn, sans));
}
private static boolean matchesRequiredCredentials(RequiredPeerCredential requiredCredential, String cn, List<String> sans) {
switch (requiredCredential.field()) {
case CN:
return cn != null && requiredCredential.pattern().matches(cn);
case SAN_DNS:
case SAN_URI:
return sans.stream()
.anyMatch(san -> requiredCredential.pattern().matches(san));
default:
throw new RuntimeException("Unknown field: " + requiredCredential.field());
}
}
private static Optional<String> getCommonName(X509Certificate peerCertificate) {
return X509CertificateUtils.getSubjectCommonNames(peerCertificate).stream()
.findFirst();
}
private static List<String> getSubjectAlternativeNames(X509Certificate peerCertificate) {
return X509CertificateUtils.getSubjectAlternativeNames(peerCertificate).stream()
.filter(san -> san.getType() == DNS || san.getType() == IP || san.getType() == URI)
.map(SubjectAlternativeName::getValue)
.collect(toList());
}
} |
Perhaps randomise the order, lest a single, failing deployment blocks all others? | private void deployRefreshedCertificates() {
var now = clock.instant();
var jobsTriggered = new AtomicInteger(0);
curator.readAllEndpointCertificateMetadata().forEach((applicationId, endpointCertificateMetadata) ->
endpointCertificateMetadata.lastRefreshed().ifPresent(lastRefreshTime -> {
Instant refreshTime = Instant.ofEpochSecond(lastRefreshTime);
if (now.isAfter(refreshTime.plus(4, ChronoUnit.DAYS))) {
controller().applications().getInstance(applicationId)
.ifPresent(instance -> instance.productionDeployments().forEach((zone, deployment) -> {
if (deployment.at().isBefore(refreshTime) && jobsTriggered.compareAndSet(0, 1)) {
JobType job = JobType.deploymentTo(zone);
deploymentTrigger.reTrigger(applicationId, job, "re-triggered by EndpointCertificateMaintainer");
log.info("Re-triggering deployment job " + job.jobName() + " for instance " +
applicationId.serializedForm() + " to roll out refreshed endpoint certificate");
}
}));
}
}));
} | if (deployment.at().isBefore(refreshTime) && jobsTriggered.compareAndSet(0, 1)) { | private void deployRefreshedCertificates() {
var now = clock.instant();
var eligibleJobs = new ArrayList<EligibleJob>();
curator.readAllEndpointCertificateMetadata().forEach((applicationId, endpointCertificateMetadata) ->
endpointCertificateMetadata.lastRefreshed().ifPresent(lastRefreshTime -> {
Instant refreshTime = Instant.ofEpochSecond(lastRefreshTime);
if (now.isAfter(refreshTime.plus(4, ChronoUnit.DAYS))) {
controller().applications().getInstance(applicationId)
.ifPresent(instance -> instance.productionDeployments().forEach((zone, deployment) -> {
if (deployment.at().isBefore(refreshTime)) {
JobType job = JobType.deploymentTo(zone);
eligibleJobs.add(new EligibleJob(deployment, applicationId, job));
}
}));
}
}));
eligibleJobs.stream()
.min(oldestFirst)
.ifPresent(e -> {
deploymentTrigger.reTrigger(e.applicationId, e.job, "re-triggered by EndpointCertificateMaintainer");
log.info("Re-triggering deployment job " + e.job.jobName() + " for instance " +
e.applicationId.serializedForm() + " to roll out refreshed endpoint certificate");
});
} | class EndpointCertificateMaintainer extends ControllerMaintainer {
private static final Logger log = Logger.getLogger(EndpointCertificateMaintainer.class.getName());
private final DeploymentTrigger deploymentTrigger;
private final Clock clock;
private final CuratorDb curator;
private final SecretStore secretStore;
private final EndpointCertificateProvider endpointCertificateProvider;
@Inject
public EndpointCertificateMaintainer(Controller controller, Duration interval) {
super(controller, interval);
this.deploymentTrigger = controller.applications().deploymentTrigger();
this.clock = controller.clock();
this.secretStore = controller.secretStore();
this.curator = controller().curator();
this.endpointCertificateProvider = controller.serviceRegistry().endpointCertificateProvider();
}
@Override
protected double maintain() {
try {
deployRefreshedCertificates();
updateRefreshedCertificates();
deleteUnusedCertificates();
deleteOrReportUnmanagedCertificates();
} catch (Exception e) {
log.log(Level.SEVERE, "Exception caught while maintaining endpoint certificates", e);
return 0.0;
}
return 1.0;
}
private void updateRefreshedCertificates() {
curator.readAllEndpointCertificateMetadata().forEach(((applicationId, endpointCertificateMetadata) -> {
var latestAvailableVersion = latestVersionInSecretStore(endpointCertificateMetadata);
if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > endpointCertificateMetadata.version()) {
var refreshedCertificateMetadata = endpointCertificateMetadata
.withVersion(latestAvailableVersion.getAsInt())
.withLastRefreshed(clock.instant().getEpochSecond());
try (Mutex lock = lock(applicationId)) {
if (Optional.of(endpointCertificateMetadata).equals(curator.readEndpointCertificateMetadata(applicationId))) {
curator.writeEndpointCertificateMetadata(applicationId, refreshedCertificateMetadata);
}
}
}
}));
}
/**
* If it's been four days since the cert has been refreshed, re-trigger prod deployment jobs (one at a time).
*/
private OptionalInt latestVersionInSecretStore(EndpointCertificateMetadata originalCertificateMetadata) {
try {
var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName()));
var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName()));
return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max();
} catch (SecretNotFoundException s) {
return OptionalInt.empty();
}
}
private void deleteUnusedCertificates() {
var oneMonthAgo = clock.instant().minus(30, ChronoUnit.DAYS);
curator.readAllEndpointCertificateMetadata().forEach((applicationId, storedMetaData) -> {
var lastRequested = Instant.ofEpochSecond(storedMetaData.lastRequested());
if (lastRequested.isBefore(oneMonthAgo) && hasNoDeployments(applicationId)) {
try (Mutex lock = lock(applicationId)) {
if (Optional.of(storedMetaData).equals(curator.readEndpointCertificateMetadata(applicationId))) {
log.log(Level.INFO, "Cert for app " + applicationId.serializedForm()
+ " has not been requested in a month and app has no deployments, deleting from provider and ZK");
endpointCertificateProvider.deleteCertificate(applicationId, storedMetaData.rootRequestId());
curator.deleteEndpointCertificateMetadata(applicationId);
}
}
}
});
}
private Mutex lock(ApplicationId applicationId) {
return curator.lock(TenantAndApplicationId.from(applicationId));
}
private boolean hasNoDeployments(ApplicationId applicationId) {
return controller().applications().getInstance(applicationId)
.map(Instance::deployments)
.orElseGet(Map::of)
.isEmpty();
}
private void deleteOrReportUnmanagedCertificates() {
List<EndpointCertificateRequestMetadata> endpointCertificateMetadata = endpointCertificateProvider.listCertificates();
Map<ApplicationId, EndpointCertificateMetadata> storedEndpointCertificateMetadata = curator.readAllEndpointCertificateMetadata();
List<String> leafRequestIds = storedEndpointCertificateMetadata.values().stream().flatMap(m -> m.leafRequestId().stream()).collect(Collectors.toList());
List<String> rootRequestIds = storedEndpointCertificateMetadata.values().stream().map(EndpointCertificateMetadata::rootRequestId).collect(Collectors.toList());
for (var providerCertificateMetadata : endpointCertificateMetadata) {
if (!rootRequestIds.contains(providerCertificateMetadata.requestId()) && !leafRequestIds.contains(providerCertificateMetadata.requestId())) {
EndpointCertificateDetails unknownCertDetails = endpointCertificateProvider.certificateDetails(providerCertificateMetadata.requestId());
boolean matchFound = false;
for (Map.Entry<ApplicationId, EndpointCertificateMetadata> storedAppEntry : storedEndpointCertificateMetadata.entrySet()) {
ApplicationId storedApp = storedAppEntry.getKey();
EndpointCertificateMetadata storedAppMetadata = storedAppEntry.getValue();
if (storedAppMetadata.certName().equals(unknownCertDetails.cert_key_keyname())) {
matchFound = true;
try (Mutex lock = lock(storedApp)) {
if (Optional.of(storedAppMetadata).equals(curator.readEndpointCertificateMetadata(storedApp))) {
log.log(Level.INFO, "Cert for app " + storedApp.serializedForm()
+ " has a new leafRequestId " + unknownCertDetails.request_id() + ", updating in ZK");
curator.writeEndpointCertificateMetadata(storedApp, storedAppMetadata.withLeafRequestId(Optional.of(unknownCertDetails.request_id())));
}
break;
}
}
}
if (!matchFound) {
if (Instant.parse(providerCertificateMetadata.createTime()).isBefore(Instant.now().minus(7, ChronoUnit.DAYS))) {
log.log(Level.INFO, String.format("Deleting unmaintained certificate with request_id %s and SANs %s",
providerCertificateMetadata.requestId(),
providerCertificateMetadata.dnsNames().stream().map(d -> d.dnsName).collect(Collectors.joining(", "))));
endpointCertificateProvider.deleteCertificate(ApplicationId.fromSerializedForm("applicationid:is:unknown"), providerCertificateMetadata.requestId());
}
}
}
}
}
} | class EndpointCertificateMaintainer extends ControllerMaintainer {
private static final Logger log = Logger.getLogger(EndpointCertificateMaintainer.class.getName());
private final DeploymentTrigger deploymentTrigger;
private final Clock clock;
private final CuratorDb curator;
private final SecretStore secretStore;
private final EndpointCertificateProvider endpointCertificateProvider;
final Comparator<EligibleJob> oldestFirst = Comparator.comparing(e -> e.deployment.at());
@Inject
public EndpointCertificateMaintainer(Controller controller, Duration interval) {
super(controller, interval);
this.deploymentTrigger = controller.applications().deploymentTrigger();
this.clock = controller.clock();
this.secretStore = controller.secretStore();
this.curator = controller().curator();
this.endpointCertificateProvider = controller.serviceRegistry().endpointCertificateProvider();
}
@Override
protected double maintain() {
try {
deployRefreshedCertificates();
updateRefreshedCertificates();
deleteUnusedCertificates();
deleteOrReportUnmanagedCertificates();
} catch (Exception e) {
log.log(Level.SEVERE, "Exception caught while maintaining endpoint certificates", e);
return 0.0;
}
return 1.0;
}
private void updateRefreshedCertificates() {
curator.readAllEndpointCertificateMetadata().forEach(((applicationId, endpointCertificateMetadata) -> {
var latestAvailableVersion = latestVersionInSecretStore(endpointCertificateMetadata);
if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > endpointCertificateMetadata.version()) {
var refreshedCertificateMetadata = endpointCertificateMetadata
.withVersion(latestAvailableVersion.getAsInt())
.withLastRefreshed(clock.instant().getEpochSecond());
try (Mutex lock = lock(applicationId)) {
if (Optional.of(endpointCertificateMetadata).equals(curator.readEndpointCertificateMetadata(applicationId))) {
curator.writeEndpointCertificateMetadata(applicationId, refreshedCertificateMetadata);
}
}
}
}));
}
record EligibleJob(Deployment deployment, ApplicationId applicationId, JobType job) {}
/**
* If it's been four days since the cert has been refreshed, re-trigger prod deployment jobs (one at a time).
*/
private OptionalInt latestVersionInSecretStore(EndpointCertificateMetadata originalCertificateMetadata) {
try {
var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName()));
var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName()));
return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max();
} catch (SecretNotFoundException s) {
return OptionalInt.empty();
}
}
private void deleteUnusedCertificates() {
var oneMonthAgo = clock.instant().minus(30, ChronoUnit.DAYS);
curator.readAllEndpointCertificateMetadata().forEach((applicationId, storedMetaData) -> {
var lastRequested = Instant.ofEpochSecond(storedMetaData.lastRequested());
if (lastRequested.isBefore(oneMonthAgo) && hasNoDeployments(applicationId)) {
try (Mutex lock = lock(applicationId)) {
if (Optional.of(storedMetaData).equals(curator.readEndpointCertificateMetadata(applicationId))) {
log.log(Level.INFO, "Cert for app " + applicationId.serializedForm()
+ " has not been requested in a month and app has no deployments, deleting from provider and ZK");
endpointCertificateProvider.deleteCertificate(applicationId, storedMetaData.rootRequestId());
curator.deleteEndpointCertificateMetadata(applicationId);
}
}
}
});
}
private Mutex lock(ApplicationId applicationId) {
return curator.lock(TenantAndApplicationId.from(applicationId));
}
private boolean hasNoDeployments(ApplicationId applicationId) {
return controller().applications().getInstance(applicationId)
.map(Instance::deployments)
.orElseGet(Map::of)
.isEmpty();
}
private void deleteOrReportUnmanagedCertificates() {
List<EndpointCertificateRequestMetadata> endpointCertificateMetadata = endpointCertificateProvider.listCertificates();
Map<ApplicationId, EndpointCertificateMetadata> storedEndpointCertificateMetadata = curator.readAllEndpointCertificateMetadata();
List<String> leafRequestIds = storedEndpointCertificateMetadata.values().stream().flatMap(m -> m.leafRequestId().stream()).toList();
List<String> rootRequestIds = storedEndpointCertificateMetadata.values().stream().map(EndpointCertificateMetadata::rootRequestId).toList();
for (var providerCertificateMetadata : endpointCertificateMetadata) {
if (!rootRequestIds.contains(providerCertificateMetadata.requestId()) && !leafRequestIds.contains(providerCertificateMetadata.requestId())) {
EndpointCertificateDetails unknownCertDetails = endpointCertificateProvider.certificateDetails(providerCertificateMetadata.requestId());
boolean matchFound = false;
for (Map.Entry<ApplicationId, EndpointCertificateMetadata> storedAppEntry : storedEndpointCertificateMetadata.entrySet()) {
ApplicationId storedApp = storedAppEntry.getKey();
EndpointCertificateMetadata storedAppMetadata = storedAppEntry.getValue();
if (storedAppMetadata.certName().equals(unknownCertDetails.cert_key_keyname())) {
matchFound = true;
try (Mutex lock = lock(storedApp)) {
if (Optional.of(storedAppMetadata).equals(curator.readEndpointCertificateMetadata(storedApp))) {
log.log(Level.INFO, "Cert for app " + storedApp.serializedForm()
+ " has a new leafRequestId " + unknownCertDetails.request_id() + ", updating in ZK");
curator.writeEndpointCertificateMetadata(storedApp, storedAppMetadata.withLeafRequestId(Optional.of(unknownCertDetails.request_id())));
}
break;
}
}
}
if (!matchFound) {
if (Instant.parse(providerCertificateMetadata.createTime()).isBefore(Instant.now().minus(7, ChronoUnit.DAYS))) {
log.log(Level.INFO, String.format("Deleting unmaintained certificate with request_id %s and SANs %s",
providerCertificateMetadata.requestId(),
providerCertificateMetadata.dnsNames().stream().map(d -> d.dnsName).collect(Collectors.joining(", "))));
endpointCertificateProvider.deleteCertificate(ApplicationId.fromSerializedForm("applicationid:is:unknown"), providerCertificateMetadata.requestId());
}
}
}
}
}
} |
What does OFFSET_END mean? | private void getReadableProgress(Map<Integer, String> showPartitionIdToOffset) {
for (Map.Entry<Integer, Long> entry : partitionIdToOffset.entrySet()) {
if (entry.getValue() == 0) {
showPartitionIdToOffset.put(entry.getKey(), OFFSET_ZERO);
} else if (entry.getValue() == -1) {
showPartitionIdToOffset.put(entry.getKey(), OFFSET_END);
} else if (entry.getValue() == -2) {
showPartitionIdToOffset.put(entry.getKey(), OFFSET_BEGINNING);
} else {
showPartitionIdToOffset.put(entry.getKey(), "" + (entry.getValue() - 1));
}
}
} | showPartitionIdToOffset.put(entry.getKey(), OFFSET_END); | private void getReadableProgress(Map<Integer, String> showPartitionIdToOffset) {
for (Map.Entry<Integer, Long> entry : partitionIdToOffset.entrySet()) {
if (entry.getValue() == 0) {
showPartitionIdToOffset.put(entry.getKey(), OFFSET_ZERO);
} else if (entry.getValue() == -1) {
showPartitionIdToOffset.put(entry.getKey(), OFFSET_END);
} else if (entry.getValue() == -2) {
showPartitionIdToOffset.put(entry.getKey(), OFFSET_BEGINNING);
} else {
showPartitionIdToOffset.put(entry.getKey(), "" + (entry.getValue() - 1));
}
}
} | class KafkaProgress extends RoutineLoadProgress {
public static final String OFFSET_BEGINNING = "OFFSET_BEGINNING";
public static final String OFFSET_END = "OFFSET_END";
public static final String OFFSET_ZERO = "OFFSET_ZERO";
public static final long OFFSET_BEGINNING_VAL = -2;
public static final long OFFSET_END_VAL = -1;
private Map<Integer, Long> partitionIdToOffset = Maps.newConcurrentMap();
public KafkaProgress() {
super(LoadDataSourceType.KAFKA);
}
public KafkaProgress(TKafkaRLTaskProgress tKafkaRLTaskProgress) {
super(LoadDataSourceType.KAFKA);
this.partitionIdToOffset = tKafkaRLTaskProgress.getPartitionCmtOffset();
}
public Map<Integer, Long> getPartitionIdToOffset(List<Integer> partitionIds) {
Map<Integer, Long> result = Maps.newHashMap();
for (Map.Entry<Integer, Long> entry : partitionIdToOffset.entrySet()) {
for (Integer partitionId : partitionIds) {
if (entry.getKey().equals(partitionId)) {
result.put(partitionId, entry.getValue());
}
}
}
return result;
}
public void addPartitionOffset(Pair<Integer, Long> partitionOffset) {
partitionIdToOffset.put(partitionOffset.first, partitionOffset.second);
}
public Long getOffsetByPartition(int kafkaPartition) {
return partitionIdToOffset.get(kafkaPartition);
}
public boolean containsPartition(Integer kafkaPartition) {
return partitionIdToOffset.containsKey(kafkaPartition);
}
public boolean hasPartition() {
return partitionIdToOffset.isEmpty();
}
@Override
public String toString() {
Map<Integer, String> showPartitionIdToOffset = Maps.newHashMap();
getReadableProgress(showPartitionIdToOffset);
return "KafkaProgress [partitionIdToOffset="
+ Joiner.on("|").withKeyValueSeparator("_").join(showPartitionIdToOffset) + "]";
}
@Override
public String toJsonString() {
Map<Integer, String> showPartitionIdToOffset = Maps.newHashMap();
getReadableProgress(showPartitionIdToOffset);
Gson gson = new Gson();
return gson.toJson(showPartitionIdToOffset);
}
@Override
public void update(RoutineLoadProgress progress) {
KafkaProgress newProgress = (KafkaProgress) progress;
newProgress.partitionIdToOffset.entrySet().stream()
.forEach(entity -> this.partitionIdToOffset.put(entity.getKey(), entity.getValue() + 1));
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
out.writeInt(partitionIdToOffset.size());
for (Map.Entry<Integer, Long> entry : partitionIdToOffset.entrySet()) {
out.writeInt((Integer) entry.getKey());
out.writeLong((Long) entry.getValue());
}
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
int size = in.readInt();
partitionIdToOffset = new HashMap<>();
for (int i = 0; i < size; i++) {
partitionIdToOffset.put(in.readInt(), in.readLong());
}
}
} | class KafkaProgress extends RoutineLoadProgress {
public static final String OFFSET_BEGINNING = "OFFSET_BEGINNING";
public static final String OFFSET_END = "OFFSET_END";
public static final String OFFSET_ZERO = "OFFSET_ZERO";
public static final long OFFSET_BEGINNING_VAL = -2;
public static final long OFFSET_END_VAL = -1;
private Map<Integer, Long> partitionIdToOffset = Maps.newConcurrentMap();
public KafkaProgress() {
super(LoadDataSourceType.KAFKA);
}
public KafkaProgress(TKafkaRLTaskProgress tKafkaRLTaskProgress) {
super(LoadDataSourceType.KAFKA);
this.partitionIdToOffset = tKafkaRLTaskProgress.getPartitionCmtOffset();
}
public Map<Integer, Long> getPartitionIdToOffset(List<Integer> partitionIds) {
Map<Integer, Long> result = Maps.newHashMap();
for (Map.Entry<Integer, Long> entry : partitionIdToOffset.entrySet()) {
for (Integer partitionId : partitionIds) {
if (entry.getKey().equals(partitionId)) {
result.put(partitionId, entry.getValue());
}
}
}
return result;
}
public void addPartitionOffset(Pair<Integer, Long> partitionOffset) {
partitionIdToOffset.put(partitionOffset.first, partitionOffset.second);
}
public Long getOffsetByPartition(int kafkaPartition) {
return partitionIdToOffset.get(kafkaPartition);
}
public boolean containsPartition(Integer kafkaPartition) {
return partitionIdToOffset.containsKey(kafkaPartition);
}
public boolean hasPartition() {
return partitionIdToOffset.isEmpty();
}
@Override
public String toString() {
Map<Integer, String> showPartitionIdToOffset = Maps.newHashMap();
getReadableProgress(showPartitionIdToOffset);
return "KafkaProgress [partitionIdToOffset="
+ Joiner.on("|").withKeyValueSeparator("_").join(showPartitionIdToOffset) + "]";
}
@Override
public String toJsonString() {
Map<Integer, String> showPartitionIdToOffset = Maps.newHashMap();
getReadableProgress(showPartitionIdToOffset);
Gson gson = new Gson();
return gson.toJson(showPartitionIdToOffset);
}
@Override
public void update(RoutineLoadProgress progress) {
KafkaProgress newProgress = (KafkaProgress) progress;
newProgress.partitionIdToOffset.entrySet().stream()
.forEach(entity -> this.partitionIdToOffset.put(entity.getKey(), entity.getValue() + 1));
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
out.writeInt(partitionIdToOffset.size());
for (Map.Entry<Integer, Long> entry : partitionIdToOffset.entrySet()) {
out.writeInt((Integer) entry.getKey());
out.writeLong((Long) entry.getValue());
}
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
int size = in.readInt();
partitionIdToOffset = new HashMap<>();
for (int i = 0; i < size; i++) {
partitionIdToOffset.put(in.readInt(), in.readLong());
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.