text stringlengths 1 1.05M |
|---|
<reponame>maximusofsky/spring-boot
/*
* Copyright 2007-2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springbyexample.orm.hibernate3.dao;
import java.sql.SQLException;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.test.annotation.Rollback;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import org.springframework.test.context.transaction.AfterTransaction;
import org.springframework.test.context.transaction.TransactionConfiguration;
import org.springframework.transaction.annotation.Transactional;
/**
* Tests transactions using Spring's transaction unit test
* framework. The spring configuration doesn't use
* <tx:annotation-driven/> so the <code>@Transactional</code>
* annotation in the <code>PersonDaoImpl</code> class isn't used.
*
* @author <NAME>
*/
@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration
@TransactionConfiguration
@Transactional
public class PersonDaoTransactionUnitRollbackTest extends PersonDaoTransactionUnitTest {
final Logger logger = LoggerFactory.getLogger(PersonDaoTransactionUnitRollbackTest.class);
/**
* Tests person table and changes the first records last name.
* The default rollback is <code>true</code>, but this changes
* this one method to <code>false</code>.
*/
@Test
@Rollback(false)
public void testHibernateTemplate() throws SQLException {
super.testHibernateTemplate();
}
/**
* Tests that the size and first record match what is expected
* after the transaction.
*/
@AfterTransaction
public void afterTransaction() {
testPerson(false, CHANGED_LAST_NAME);
}
}
|
<gh_stars>0
import { createSelector } from 'reselect';
const getRestaurants = state => state._root.entries[0][1].restaurantList;
export const getBestDeal = createSelector([getRestaurants], restaurants => {
if (restaurants) {
return restaurants.sort(
(a, b) => a.averageDishPrice - b.averageDishPrice,
)[0];
}
});
|
<gh_stars>1-10
package cn.pasteme.common.utils.exception;
import cn.pasteme.common.utils.result.ResponseCode;
import lombok.Getter;
/**
* @author Lucien, 白振宇
* @version 1.0.0
*/
@Getter
public class GlobalException extends RuntimeException {
private ResponseCode responseCode;
public GlobalException(ResponseCode responseCode) {
super(responseCode.toString());
this.responseCode = responseCode;
}
}
|
<filename>vseditors/src/main/java/org/museautomation/ui/valuesource/ValueSourceEditorListener.java<gh_stars>0
package org.museautomation.ui.valuesource;
import org.museautomation.core.values.*;
/**
* Listen for changes to a ValueSourceEditor
*
* @author <NAME> (see LICENSE.txt for license details)
*/
public interface ValueSourceEditorListener
{
void sourceChanged(ValueSourceEditor editor, ValueSourceConfiguration old_value, ValueSourceConfiguration new_value);
}
|
class QuickSort {
static int partition(int arr[], int low, int high) {
int pivot = arr[high];
int i = (low - 1);
for (int j = low; j <= high - 1; j++) {
if (arr[j] <= pivot) {
i++;
int temp = arr[i];
arr[i] = arr[j];
arr[j] = temp;
}
}
int temp = arr[i + 1];
arr[i + 1] = arr[high];
arr[high] = temp;
return i + 1;
}
static void quickSort(int arr[], int low, int high) {
if (low < high) {
int pi = partition(arr, low, high);
quickSort(arr, low, pi - 1);
quickSort(arr, pi + 1, high);
}
}
public static void main(String args[]) {
int[] arr = {3, 9, 5, 4, 1, 7, 0, 6, 2, 8};
int n = arr.length;
QuickSort obj = new QuickSort();
obj.quickSort(arr, 0, n - 1);
for (int i = 0; i < n; ++i)
System.out.print(arr[i] + " ");
}
} |
package de.unistuttgart.ims.coref.annotator.document;
import java.util.Map;
import java.util.prefs.Preferences;
import org.apache.commons.collections4.multimap.HashSetValuedHashMap;
import org.apache.commons.lang3.StringUtils;
import org.apache.uima.cas.Feature;
import org.apache.uima.cas.FeatureStructure;
import org.apache.uima.fit.factory.AnnotationFactory;
import org.apache.uima.fit.util.JCasUtil;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.cas.FSArray;
import org.apache.uima.jcas.cas.StringArray;
import org.apache.uima.jcas.tcas.Annotation;
import org.eclipse.collections.api.list.ImmutableList;
import org.eclipse.collections.api.list.MutableList;
import org.eclipse.collections.api.multimap.list.MutableListMultimap;
import org.eclipse.collections.api.multimap.set.MutableSetMultimap;
import org.eclipse.collections.api.set.ImmutableSet;
import org.eclipse.collections.api.set.MutableSet;
import org.eclipse.collections.api.set.sorted.ImmutableSortedSet;
import org.eclipse.collections.impl.factory.Lists;
import org.eclipse.collections.impl.factory.Maps;
import org.eclipse.collections.impl.factory.Multimaps;
import org.eclipse.collections.impl.factory.Sets;
import org.eclipse.collections.impl.factory.SortedSets;
import de.unistuttgart.ims.coref.annotator.Annotator;
import de.unistuttgart.ims.coref.annotator.ColorProvider;
import de.unistuttgart.ims.coref.annotator.Constants;
import de.unistuttgart.ims.coref.annotator.Defaults;
import de.unistuttgart.ims.coref.annotator.RangedHashSetValuedHashMap;
import de.unistuttgart.ims.coref.annotator.Span;
import de.unistuttgart.ims.coref.annotator.Util;
import de.unistuttgart.ims.coref.annotator.api.v1.Comment;
import de.unistuttgart.ims.coref.annotator.api.v1.DetachedMentionPart;
import de.unistuttgart.ims.coref.annotator.api.v1.Entity;
import de.unistuttgart.ims.coref.annotator.api.v1.EntityGroup;
import de.unistuttgart.ims.coref.annotator.api.v1.Mention;
import de.unistuttgart.ims.coref.annotator.document.Event.Type;
import de.unistuttgart.ims.coref.annotator.document.op.AddEntityToEntityGroup;
import de.unistuttgart.ims.coref.annotator.document.op.AddMentionsToEntity;
import de.unistuttgart.ims.coref.annotator.document.op.AddMentionsToNewEntity;
import de.unistuttgart.ims.coref.annotator.document.op.AttachPart;
import de.unistuttgart.ims.coref.annotator.document.op.CoreferenceModelOperation;
import de.unistuttgart.ims.coref.annotator.document.op.GroupEntities;
import de.unistuttgart.ims.coref.annotator.document.op.MergeEntities;
import de.unistuttgart.ims.coref.annotator.document.op.MoveMentionPartToMention;
import de.unistuttgart.ims.coref.annotator.document.op.MoveMentionsToEntity;
import de.unistuttgart.ims.coref.annotator.document.op.Operation;
import de.unistuttgart.ims.coref.annotator.document.op.RemoveDuplicateMentionsInEntities;
import de.unistuttgart.ims.coref.annotator.document.op.RemoveEntities;
import de.unistuttgart.ims.coref.annotator.document.op.RemoveEntitiesFromEntityGroup;
import de.unistuttgart.ims.coref.annotator.document.op.RemoveMention;
import de.unistuttgart.ims.coref.annotator.document.op.RemovePart;
import de.unistuttgart.ims.coref.annotator.document.op.RemoveSingletons;
import de.unistuttgart.ims.coref.annotator.document.op.RenameAllEntities;
import de.unistuttgart.ims.coref.annotator.document.op.ToggleGenericFlag;
import de.unistuttgart.ims.coref.annotator.document.op.UpdateEntityColor;
import de.unistuttgart.ims.coref.annotator.document.op.UpdateEntityKey;
import de.unistuttgart.ims.coref.annotator.document.op.UpdateEntityName;
import de.unistuttgart.ims.coref.annotator.uima.AnnotationComparator;
import de.unistuttgart.ims.uimautil.AnnotationUtil;
/**
* Class represents the document and the tree view on the document. All
* annotation happens through this class.
*
*
*/
public class CoreferenceModel extends SubModel implements Model {
/**
* A mapping from character positions to annotations
*/
RangedHashSetValuedHashMap<Annotation> characterPosition2AnnotationMap = new RangedHashSetValuedHashMap<Annotation>();
/**
* Assigns colors to new entities
*/
ColorProvider colorMap = new ColorProvider();
HashSetValuedHashMap<FeatureStructure, Comment> comments = new HashSetValuedHashMap<FeatureStructure, Comment>();
/**
* A list of listeners to annotation events
*/
MutableList<CoreferenceModelListener> crModelListeners = Lists.mutable.empty();
MutableSetMultimap<Entity, Mention> entityMentionMap = Multimaps.mutable.set.empty();
MutableSetMultimap<Entity, EntityGroup> entityEntityGroupMap = Multimaps.mutable.set.empty();
Map<Character, Entity> keyMap = Maps.mutable.empty();
/**
* The document
*/
@Deprecated
JCas jcas;
public CoreferenceModel(DocumentModel documentModel) {
super(documentModel);
this.jcas = documentModel.getJcas();
}
/**
* Create a new entity e and a new mention m, and add m to e. Does not fire any
* events.
*
* @param begin
* Begin of mention
* @param end
* End of mention
* @return The new mention
*/
private Mention add(int begin, int end) {
Annotator.logger.entry(begin, end);
// document model
Mention m = createMention(begin, end);
Entity e = createEntity(m.getCoveredText());
m.setEntity(e);
entityMentionMap.put(e, m);
return m;
}
private Mention add(Span selection) {
return add(selection.begin, selection.end);
}
public boolean addCoreferenceModelListener(CoreferenceModelListener e) {
e.entityEvent(Event.get(this, Event.Type.Init));
return crModelListeners.add(e);
}
/**
* does not fire events
*
* @param e
* @param begin
* @param end
* @return
*/
private Mention addTo(Entity e, int begin, int end) {
Mention m = createMention(begin, end);
m.setEntity(e);
entityMentionMap.put(e, m);
return m;
}
/**
* does not fire events
*
* @param e
* @param span
* @return
*/
private Mention addTo(Entity e, Span span) {
return addTo(e, span.begin, span.end);
}
/**
* does not fire events
*
* @param m
* @param begin
* @param end
* @return
*/
private DetachedMentionPart addTo(Mention m, int begin, int end) {
// document model
DetachedMentionPart d = createDetachedMentionPart(begin, end);
d.setMention(m);
m.setDiscontinuous(d);
return d;
}
private DetachedMentionPart addTo(Mention m, Span sp) {
return addTo(m, sp.begin, sp.end);
}
protected DetachedMentionPart createDetachedMentionPart(int b, int e) {
DetachedMentionPart dmp = AnnotationFactory.createAnnotation(jcas, b, e, DetachedMentionPart.class);
if (getPreferences().getBoolean(Constants.CFG_TRIM_WHITESPACE, true))
dmp = AnnotationUtil.trim(dmp);
registerAnnotation(dmp);
return dmp;
}
protected Entity createEntity(String l) {
Entity e = new Entity(jcas);
e.setColor(colorMap.getNextColor().getRGB());
e.setLabel(l);
e.setFlags(new StringArray(jcas, 0));
e.addToIndexes();
return e;
}
protected EntityGroup createEntityGroup(String l, int initialSize) {
EntityGroup e = new EntityGroup(jcas);
e.setColor(colorMap.getNextColor().getRGB());
e.setLabel(l);
e.setFlags(new StringArray(jcas, 0));
e.addToIndexes();
e.setMembers(new FSArray(jcas, initialSize));
return e;
}
protected String createEntityGroupLabel(ImmutableList<Entity> entityList) {
String s = entityList.subList(0, 2).select(e -> e.getLabel() != null)
.collect(
e -> StringUtils.abbreviate(e.getLabel(), "…", (Constants.UI_MAX_STRING_WIDTH_IN_TREE / 2) - 4))
.makeString(" " + Annotator.getString(Constants.Strings.ENTITY_GROUP_AND) + " ");
if (entityList.size() > 2)
s += " + " + String.valueOf(entityList.size() - 2);
return s;
}
/**
* Creates a new mention annotation in the document and adds it to the indexes
*
* @param b
* the begin character position
* @param e
* the end character position
* @return the created mention
*/
protected Mention createMention(int b, int e) {
Mention m = AnnotationFactory.createAnnotation(jcas, b, e, Mention.class);
if (getPreferences().getBoolean(Constants.CFG_TRIM_WHITESPACE, Defaults.CFG_TRIM_WHITESPACE))
m = AnnotationUtil.trim(m);
if (getPreferences().getBoolean(Constants.CFG_FULL_TOKENS, Defaults.CFG_FULL_TOKENS))
m = Util.extend(m);
registerAnnotation(m);
return m;
}
protected void edit(MergeEntities op) {
MutableSetMultimap<Entity, Mention> currentState = Multimaps.mutable.set.empty();
op.getEntities().forEach(e -> currentState.putAll(e, entityMentionMap.get(e)));
op.setPreviousState(currentState.toImmutable());
op.setEntity(merge(op.getEntities()));
registerEdit(op);
}
protected synchronized void edit(CoreferenceModelOperation operation) {
Annotator.logger.entry(operation);
if (operation instanceof UpdateEntityName) {
UpdateEntityName op = (UpdateEntityName) operation;
op.getEntity().setLabel(op.getNewLabel());
fireEvent(Event.get(this, Event.Type.Update, op.getEntity()));
} else if (operation instanceof RemoveDuplicateMentionsInEntities) {
edit((RemoveDuplicateMentionsInEntities) operation);
} else if (operation instanceof UpdateEntityKey) {
UpdateEntityKey op = (UpdateEntityKey) operation;
if (op.getNewKey() != null && keyMap.containsKey(op.getNewKey())) {
Entity prev = keyMap.get(op.getNewKey());
op.setPreviousOwner(prev);
prev.setKey(null);
}
if (op.getNewKey() == null)
op.getObjects().getFirst().setKey(null);
else {
op.getObjects().getFirst().setKey(op.getNewKey().toString());
keyMap.put(op.getNewKey(), op.getEntity());
}
if (op.getPreviousOwner() != null)
fireEvent(Event.get(this, Event.Type.Update, op.getObjects().getFirst(), op.getPreviousOwner()));
else
fireEvent(Event.get(this, Event.Type.Update, op.getObjects().getFirst()));
} else if (operation instanceof UpdateEntityColor) {
UpdateEntityColor op = (UpdateEntityColor) operation;
op.getObjects().getFirst().setColor(op.getNewColor());
fireEvent(Event.get(this, Event.Type.Update, op.getObjects()));
fireEvent(Event.get(this, Event.Type.Update, op.getObjects().flatCollect(e -> entityMentionMap.get(e))));
} else if (operation instanceof AddEntityToEntityGroup) {
AddEntityToEntityGroup op = (AddEntityToEntityGroup) operation;
MutableList<Entity> oldArr = Util.toList(op.getEntityGroup().getMembers());
MutableList<Entity> newMembers = Lists.mutable.withAll(op.getEntities());
newMembers.removeAll(oldArr);
op.setEntities(newMembers.toImmutable());
FSArray arr = new FSArray(jcas, op.getEntityGroup().getMembers().size() + newMembers.size());
int i = 0;
for (; i < op.getEntityGroup().getMembers().size(); i++) {
arr.set(i, op.getEntityGroup().getMembers(i));
}
int oldSize = i;
for (; i < arr.size(); i++) {
arr.set(i, newMembers.get(i - oldSize));
}
arr.addToIndexes();
op.getEntityGroup().removeFromIndexes();
op.getEntityGroup().setMembers(arr);
fireEvent(Event.get(this, Event.Type.Add, op.getEntityGroup(), op.getEntities()));
} else if (operation instanceof AddMentionsToNewEntity) {
AddMentionsToNewEntity op = (AddMentionsToNewEntity) operation;
MutableList<Mention> ms = Lists.mutable.empty();
for (Span span : op.getSpans()) {
if (op.getEntity() == null) {
Mention fst = add(span);
ms.add(fst);
op.setEntity(fst.getEntity());
} else
ms.add(addTo(op.getEntity(), span));
}
fireEvent(Event.get(this, Event.Type.Add, null, op.getEntity()));
fireEvent(Event.get(this, Event.Type.Add, op.getEntity(), ms.toImmutable()));
} else if (operation instanceof AddMentionsToEntity) {
AddMentionsToEntity op = (AddMentionsToEntity) operation;
op.setMentions(op.getSpans().collect(sp -> {
return addTo(op.getEntity(), sp);
}));
fireEvent(Event.get(this, Event.Type.Add, op.getEntity(), op.getMentions()));
} else if (operation instanceof AttachPart) {
AttachPart op = (AttachPart) operation;
op.setPart(addTo(op.getMention(), op.getSpan()));
fireEvent(Event.get(this, Event.Type.Add, op.getMention(), op.getPart()));
} else if (operation instanceof MoveMentionsToEntity) {
MoveMentionsToEntity op = (MoveMentionsToEntity) operation;
op.getMentions().forEach(m -> moveTo(op.getTarget(), m));
fireEvent(Event.get(this, Event.Type.Update, op.getObjects()));
fireEvent(op.toEvent());
} else if (operation instanceof MoveMentionPartToMention) {
MoveMentionPartToMention op = (MoveMentionPartToMention) operation;
op.getObjects().forEach(d -> {
d.setMention(op.getTarget());
op.getTarget().setDiscontinuous(d);
op.getSource().setDiscontinuous(null);
});
fireEvent(op.toEvent());
fireEvent(Event.get(this, Event.Type.Move, op.getSource(), op.getTarget(), op.getObjects()));
} else if (operation instanceof RemoveEntities) {
RemoveEntities op = (RemoveEntities) operation;
op.getFeatureStructures().forEach(e -> {
if (entityEntityGroupMap.containsKey(e))
op.entityEntityGroupMap.putAll(e, entityEntityGroupMap.get(e));
remove(e);
});
} else if (operation instanceof RemoveEntitiesFromEntityGroup) {
RemoveEntitiesFromEntityGroup op = (RemoveEntitiesFromEntityGroup) operation;
op.getEntities().forEach(e -> removeFrom(op.getEntityGroup(), e));
} else if (operation instanceof RemovePart) {
RemovePart op = (RemovePart) operation;
remove(op.getPart());
fireEvent(Event.get(this, Type.Remove, op.getMention(), op.getPart()));
} else if (operation instanceof GroupEntities) {
GroupEntities op = (GroupEntities) operation;
Annotator.logger.trace("Forming entity group with {}.", op.getEntities());
EntityGroup eg = createEntityGroup(createEntityGroupLabel(op.getEntities()), op.getEntities().size());
for (int i = 0; i < op.getEntities().size(); i++) {
eg.setMembers(i, op.getEntities().get(i));
entityEntityGroupMap.put(op.getEntities().get(i), eg);
}
fireEvent(Event.get(this, Event.Type.Add, null, eg));
op.setEntityGroup(eg);
} else if (operation instanceof RemoveMention) {
edit((RemoveMention) operation);
} else if (operation instanceof RemoveSingletons) {
edit((RemoveSingletons) operation);
} else if (operation instanceof MergeEntities) {
edit((MergeEntities) operation);
} else if (operation instanceof ToggleGenericFlag) {
edit((ToggleGenericFlag) operation);
} else if (operation instanceof RenameAllEntities) {
edit((RenameAllEntities) operation);
} else {
throw new UnsupportedOperationException();
}
}
protected void edit(RemoveDuplicateMentionsInEntities op) {
MutableSet<Mention> allRemoved = Sets.mutable.empty();
op.getEntities().forEach(e -> {
MutableListMultimap<Span, Mention> map = Multimaps.mutable.list.empty();
MutableList<Mention> toRemove = Lists.mutable.empty();
for (Mention m : entityMentionMap.get(e)) {
Span s = new Span(m);
if (map.containsKey(s)) {
for (Mention m2 : map.get(s)) {
if (m2.getDiscontinuous() == null && m.getDiscontinuous() == null) {
toRemove.add(m);
} else if (m2.getDiscontinuous() != null && m.getDiscontinuous() != null) {
Span s1 = new Span(m.getDiscontinuous());
Span s2 = new Span(m2.getDiscontinuous());
if (s1.equals(s2)) {
toRemove.add(m);
} else {
map.put(s, m);
}
} else {
map.put(s, m);
}
}
} else {
map.put(s, m);
}
}
toRemove.forEach(m -> {
remove(m, false);
if (m.getDiscontinuous() != null) {
DetachedMentionPart dmp = m.getDiscontinuous();
remove(dmp);
fireEvent(Event.get(this, Type.Remove, m, dmp));
}
});
fireEvent(Event.get(this, Event.Type.Remove, e, toRemove.toImmutable()));
allRemoved.addAll(toRemove);
});
op.setFeatureStructures(allRemoved.toList().toImmutable());
registerEdit(op);
}
protected void edit(RemoveMention op) {
op.getFeatureStructures().forEach(m -> {
remove(m, false);
if (m.getDiscontinuous() != null) {
DetachedMentionPart dmp = m.getDiscontinuous();
remove(dmp);
fireEvent(Event.get(this, Type.Remove, m, dmp));
}
});
fireEvent(Event.get(this, Event.Type.Remove, op.getEntity(), op.getFeatureStructures()));
registerEdit(op);
}
protected void edit(RemoveSingletons operation) {
MutableSet<Entity> entities = Sets.mutable.empty();
MutableSet<Mention> mentions = Sets.mutable.empty();
for (Entity entity : Lists.immutable.withAll(JCasUtil.select(jcas, Entity.class))) {
ImmutableSet<Mention> ms = getMentions(entity);
switch (ms.size()) {
case 0:
remove(entity);
entities.add(entity);
break;
case 1:
Mention m = ms.getOnly();
remove(m.getEntity());
mentions.add(m);
break;
default:
break;
}
}
operation.setFeatureStructures(entities.toList().toImmutable());
operation.setMentions(mentions.toList().toImmutable());
// fireEvent(null); // TODO
registerEdit(operation);
}
protected void edit(RenameAllEntities operation) {
for (Entity entity : entityMentionMap.keySet()) {
Mention nameGiver;
switch (operation.getStrategy()) {
case LAST:
nameGiver = entityMentionMap.get(entity).maxBy(m -> m.getBegin());
break;
case LONGEST:
nameGiver = entityMentionMap.get(entity).maxBy(m -> m.getEnd() - m.getBegin());
break;
case FIRST:
default:
nameGiver = entityMentionMap.get(entity).minBy(m -> m.getBegin());
break;
}
operation.registerOldName(entity, getLabel(entity));
String newName = nameGiver.getCoveredText();
entity.setLabel(newName);
}
fireEvent(Event.get(this, Event.Type.Update, operation.getOldNames().keySet()));
}
protected void edit(ToggleGenericFlag operation) {
MutableSet<FeatureStructure> featureStructures = Sets.mutable.empty();
operation.getObjects().forEach(fs -> {
Feature feature = fs.getType().getFeatureByBaseName("Flags");
featureStructures.add(fs);
if (Util.isX(fs, operation.getFlag())) {
fs.setFeatureValue(feature,
Util.removeFrom(jcas, (StringArray) fs.getFeatureValue(feature), operation.getFlag()));
} else {
fs.setFeatureValue(feature,
Util.addTo(jcas, (StringArray) fs.getFeatureValue(feature), operation.getFlag()));
}
});
fireEvent(Event.get(this, Event.Type.Update, operation.getObjects()));
fireEvent(Event.get(this, Event.Type.Update, featureStructures));
registerEdit(operation);
}
protected void fireEvent(FeatureStructureEvent event) {
crModelListeners.forEach(l -> l.entityEvent(event));
}
public ImmutableSet<Mention> get(Entity entity) {
return entityMentionMap.get(entity).toImmutable();
}
@Override
public DocumentModel getDocumentModel() {
return documentModel;
}
public JCas getJCas() {
return documentModel.getJcas();
}
public Map<Character, Entity> getKeyMap() {
return keyMap;
}
public String getLabel(Entity entity) {
if (entity.getLabel() != null)
return entity.getLabel();
return get(entity).collect(m -> m.getCoveredText()).maxBy(s -> s.length());
}
public ImmutableSortedSet<Mention> getMentions() {
return SortedSets.immutable.withAll(new AnnotationComparator(), JCasUtil.select(getJCas(), Mention.class));
}
public ImmutableSet<Mention> getMentions(Entity entity) {
return entityMentionMap.get(entity).toImmutable();
}
public Mention getNextMention(int position) {
for (int i = position; i < getDocumentModel().getJcas().getDocumentText().length(); i++) {
MutableSet<Mention> mentions = characterPosition2AnnotationMap.get(i).selectInstancesOf(Mention.class);
if (!mentions.isEmpty())
return mentions.iterator().next();
}
return null;
}
public Mention getPreviousMention(int position) {
for (int i = position - 1; i >= 0; i--) {
MutableSet<Mention> mentions = characterPosition2AnnotationMap.get(i).selectInstancesOf(Mention.class);
if (!mentions.isEmpty())
return mentions.iterator().next();
}
return null;
}
/**
* Retrieve all annotations that cover the current character position
*
* @param position
* The character position
* @return A collection of annotations
*/
public MutableSet<Annotation> getMentions(int position) {
return this.characterPosition2AnnotationMap.get(position);
}
public ImmutableSet<Annotation> getMentions(int start, int end) {
MutableSet<Annotation> mentions = Sets.mutable.empty();
for (int i = start; i <= end; i++) {
mentions.addAll(characterPosition2AnnotationMap.get(i).select(a -> a instanceof Mention));
}
return mentions.toImmutable();
}
public Preferences getPreferences() {
return documentModel.getPreferences();
}
@Override
protected void initializeOnce() {
for (Entity entity : JCasUtil.select(documentModel.getJcas(), Entity.class)) {
if (entity.getKey() != null)
keyMap.put(new Character(entity.getKey().charAt(0)), entity);
}
for (Mention mention : JCasUtil.select(documentModel.getJcas(), Mention.class)) {
entityMentionMap.put(mention.getEntity(), mention);
mention.getEntity().addToIndexes();
registerAnnotation(mention);
if (mention.getDiscontinuous() != null) {
registerAnnotation(mention.getDiscontinuous());
}
}
}
@Deprecated
public void initialPainting() {
if (initialized)
return;
for (Entity entity : JCasUtil.select(jcas, Entity.class)) {
fireEvent(Event.get(this, Event.Type.Add, null, entity));
if (entity.getKey() != null)
keyMap.put(new Character(entity.getKey().charAt(0)), entity);
}
for (Mention mention : JCasUtil.select(jcas, Mention.class)) {
entityMentionMap.put(mention.getEntity(), mention);
mention.getEntity().addToIndexes();
registerAnnotation(mention);
fireEvent(Event.get(this, Event.Type.Add, mention.getEntity(), mention));
if (mention.getDiscontinuous() != null) {
registerAnnotation(mention.getDiscontinuous());
fireEvent(Event.get(this, Event.Type.Add, mention, mention.getDiscontinuous()));
}
}
initialized = true;
}
private Entity merge(Iterable<Entity> nodes) {
Entity biggest = null;
int size = 0;
for (Entity n : nodes) {
if (entityMentionMap.get(n).size() > size) {
size = entityMentionMap.get(n).size();
biggest = n;
}
}
final Entity tgt = biggest;
if (biggest != null)
for (Entity n : nodes) {
if (n != tgt) {
fireEvent(Event.get(this, Event.Type.Move, n, tgt, entityMentionMap.get(n).toList().toImmutable()));
fireEvent(Event.get(this, Event.Type.Remove, n));
entityMentionMap.get(n).toSet().forEach(m -> moveTo(tgt, m));
entityMentionMap.removeAll(n);
n.removeFromIndexes();
}
}
return biggest;
}
/**
* does not fire events
*
* @param newEntity
* @param mentions
*/
private void moveTo(Entity newEntity, Mention... mentions) {
Entity oldEntity = null;
for (Mention m : mentions) {
oldEntity = m.getEntity();
m.setEntity(newEntity);
entityMentionMap.remove(oldEntity, m);
entityMentionMap.put(newEntity, m);
}
}
public void registerAnnotation(Annotation a) {
characterPosition2AnnotationMap.add(a);
}
private void registerEdit(Operation operation) {
documentModel.fireDocumentChangedEvent();
}
/**
* does not fire evetns
*
* @param dmp
*/
private void remove(DetachedMentionPart dmp) {
dmp.removeFromIndexes();
characterPosition2AnnotationMap.remove(dmp);
};
/**
* Removes entity and fires events
*
* @param entity
*/
private void remove(Entity entity) {
fireEvent(Event.get(this, Event.Type.Remove, entity, entityMentionMap.get(entity).toList().toImmutable()));
for (Mention m : entityMentionMap.get(entity)) {
characterPosition2AnnotationMap.remove(m);
m.removeFromIndexes();
// TODO: remove parts
}
for (EntityGroup group : entityEntityGroupMap.get(entity)) {
group.setMembers(Util.removeFrom(jcas, group.getMembers(), entity));
}
entityEntityGroupMap.removeAll(entity);
fireEvent(Event.get(this, Event.Type.Remove, null, entity));
entityMentionMap.removeAll(entity);
entity.removeFromIndexes();
}
private void remove(Mention m, boolean autoRemove) {
Entity entity = m.getEntity();
characterPosition2AnnotationMap.remove(m);
entityMentionMap.remove(entity, m);
m.removeFromIndexes();
if (autoRemove && entityMentionMap.get(entity).isEmpty() && getPreferences()
.getBoolean(Constants.CFG_DELETE_EMPTY_ENTITIES, Defaults.CFG_DELETE_EMPTY_ENTITIES)) {
remove(entity);
}
}
public boolean removeCoreferenceModelListener(Object o) {
return crModelListeners.remove(o);
}
/**
* TODO: this could have a unit test
*
* @param eg
* @param entity
*/
private void removeFrom(EntityGroup eg, Entity entity) {
FSArray oldArray = eg.getMembers();
FSArray arr = new FSArray(jcas, eg.getMembers().size() - 1);
for (int i = 0, j = 0; i < oldArray.size() - 1 && j < arr.size() - 1; i++, j++) {
if (eg.getMembers(i) == entity) {
i++;
}
arr.set(j, eg.getMembers(i));
}
eg.setMembers(arr);
fireEvent(Event.get(this, Event.Type.Remove, eg, entity));
}
protected void undo(CoreferenceModelOperation operation) {
Annotator.logger.entry(operation);
if (operation instanceof UpdateEntityName) {
UpdateEntityName op = (UpdateEntityName) operation;
op.getEntity().setLabel(op.getOldLabel());
fireEvent(Event.get(this, Event.Type.Update, op.getEntity()));
} else if (operation instanceof UpdateEntityKey) {
UpdateEntityKey op = (UpdateEntityKey) operation;
if (op.getPreviousOwner() != null) {
op.getPreviousOwner().setKey(op.getNewKey().toString());
keyMap.put(op.getNewKey(), op.getPreviousOwner());
} else {
keyMap.remove(op.getNewKey());
}
if (op.getOldKey() != null) {
op.getEntity().setKey(op.getOldKey().toString());
keyMap.put(op.getOldKey(), op.getEntity());
} else {
op.getEntity().setKey(null);
}
if (op.getPreviousOwner() != null)
fireEvent(Event.get(this, Event.Type.Update, op.getObjects().getFirst(), op.getPreviousOwner()));
else
fireEvent(Event.get(this, Event.Type.Update, op.getObjects().getFirst()));
} else if (operation instanceof ToggleGenericFlag) {
edit((ToggleGenericFlag) operation);
} else if (operation instanceof UpdateEntityColor) {
UpdateEntityColor op = (UpdateEntityColor) operation;
op.getObjects().getFirst().setColor(op.getOldColor());
fireEvent(Event.get(this, Event.Type.Update, op.getObjects()));
fireEvent(Event.get(this, Event.Type.Update, op.getObjects().flatCollect(e -> entityMentionMap.get(e))));
} else if (operation instanceof AddEntityToEntityGroup) {
AddEntityToEntityGroup op = (AddEntityToEntityGroup) operation;
op.getEntities().forEach(e -> removeFrom(op.getEntityGroup(), e));
} else if (operation instanceof AddMentionsToNewEntity) {
AddMentionsToNewEntity op = (AddMentionsToNewEntity) operation;
remove(op.getEntity());
} else if (operation instanceof AddMentionsToEntity) {
AddMentionsToEntity op = (AddMentionsToEntity) operation;
op.getMentions().forEach(m -> remove(m, false));
fireEvent(Event.get(this, Event.Type.Remove, op.getEntity(), op.getMentions()));
} else if (operation instanceof AttachPart) {
AttachPart op = (AttachPart) operation;
remove(op.getPart());
fireEvent(Event.get(this, Event.Type.Remove, op.getMention(), op.getPart()));
} else if (operation instanceof MoveMentionPartToMention) {
MoveMentionPartToMention op = (MoveMentionPartToMention) operation;
op.getObjects().forEach(d -> {
op.getSource().setDiscontinuous(d);
d.setMention(op.getSource());
op.getTarget().setDiscontinuous(null);
});
fireEvent(op.toReversedEvent());
} else if (operation instanceof MoveMentionsToEntity) {
MoveMentionsToEntity op = (MoveMentionsToEntity) operation;
op.getMentions().forEach(m -> moveTo(op.getSource(), m));
fireEvent(Event.get(this, Event.Type.Update, op.getObjects()));
fireEvent(op.toReversedEvent());
} else if (operation instanceof RemoveDuplicateMentionsInEntities) {
RemoveDuplicateMentionsInEntities op = (RemoveDuplicateMentionsInEntities) operation;
op.getFeatureStructures().forEach(m -> {
m.addToIndexes();
entityMentionMap.put(m.getEntity(), m);
registerAnnotation(m);
fireEvent(Event.get(this, Type.Add, m.getEntity(), m));
});
} else if (operation instanceof RemovePart) {
RemovePart op = (RemovePart) operation;
op.getPart().setMention(op.getMention());
op.getMention().setDiscontinuous(op.getPart());
fireEvent(Event.get(this, Type.Add, op.getMention(), op.getPart()));
} else if (operation instanceof RemoveMention) {
undo((RemoveMention) operation);
} else if (operation instanceof RemoveEntities) {
RemoveEntities op = (RemoveEntities) operation;
op.getFeatureStructures().forEach(e -> {
e.addToIndexes();
if (op.entityEntityGroupMap.containsKey(e)) {
for (EntityGroup group : op.entityEntityGroupMap.get(e))
group.setMembers(Util.addTo(jcas, group.getMembers(), e));
}
});
fireEvent(Event.get(this, Event.Type.Add, null, op.getFeatureStructures()));
} else if (operation instanceof RemoveEntitiesFromEntityGroup) {
RemoveEntitiesFromEntityGroup op = (RemoveEntitiesFromEntityGroup) operation;
FSArray oldArr = op.getEntityGroup().getMembers();
FSArray newArr = new FSArray(jcas, oldArr.size() + op.getEntities().size());
int i = 0;
for (; i < oldArr.size(); i++) {
newArr.set(i, oldArr.get(i));
}
for (; i < newArr.size(); i++) {
newArr.set(i, op.getEntities().get(i - oldArr.size()));
}
op.getEntityGroup().setMembers(newArr);
newArr.addToIndexes();
oldArr.removeFromIndexes();
} else if (operation instanceof RemoveSingletons) {
undo((RemoveSingletons) operation);
} else if (operation instanceof MergeEntities) {
MergeEntities op = (MergeEntities) operation;
for (Entity oldEntity : op.getEntities()) {
if (op.getEntity() != oldEntity) {
oldEntity.addToIndexes();
fireEvent(Event.get(this, Event.Type.Add, null, oldEntity));
for (Mention m : op.getPreviousState().get(oldEntity)) {
moveTo(oldEntity, m);
}
fireEvent(Event.get(this, Type.Move, null, oldEntity,
op.getPreviousState().get(oldEntity).toList().toImmutable()));
}
}
} else if (operation instanceof GroupEntities) {
GroupEntities op = (GroupEntities) operation;
remove(op.getEntityGroup());
op.getEntities().forEach(e -> entityEntityGroupMap.remove(e, op.getEntityGroup()));
fireEvent(Event.get(this, Event.Type.Remove, null, op.getEntityGroup()));
} else if (operation instanceof RenameAllEntities) {
undo((RenameAllEntities) operation);
}
}
private void undo(RemoveMention op) {
// re-create all mentions and set them to the op
op.getFeatureStructures().forEach(m -> {
m.addToIndexes();
m.setEntity(op.getEntity());
entityMentionMap.put(op.getEntity(), m);
characterPosition2AnnotationMap.add(m);
if (m.getDiscontinuous() != null) {
m.getDiscontinuous().addToIndexes();
characterPosition2AnnotationMap.add(m.getDiscontinuous());
}
});
// fire event to draw them
fireEvent(Event.get(this, Event.Type.Add, op.getEntity(), op.getFeatureStructures()));
// re-create attached parts (if any)
op.getFeatureStructures().select(m -> m.getDiscontinuous() != null)
.forEach(m -> fireEvent(Event.get(this, Event.Type.Add, m, m.getDiscontinuous())));
}
private void undo(RemoveSingletons op) {
op.getFeatureStructures().forEach(e -> e.addToIndexes());
op.getMentions().forEach(m -> {
entityMentionMap.put(m.getEntity(), m);
characterPosition2AnnotationMap.add(m);
m.addToIndexes();
m.getEntity().addToIndexes();
fireEvent(Event.get(this, Event.Type.Add, null, m.getEntity()));
fireEvent(Event.get(this, Event.Type.Add, m.getEntity(), m));
});
fireEvent(Event.get(this, Event.Type.Add, null, op.getFeatureStructures()));
}
protected void undo(RenameAllEntities operation) {
for (Entity entity : operation.getOldNames().keySet()) {
entity.setLabel(operation.getOldNames().get(entity));
}
fireEvent(Event.get(this, Event.Type.Update, operation.getOldNames().keySet()));
}
}
|
public class Invoker<T>
{
public T Invoke(T val)
{
return val;
}
} |
<filename>stack/Stack.js
class Stack {
constructor() {
this.items = []
this.count = 0
}
push(data) {
this.items[this.count] = data
this.count++
console.log(`${data} added to ${this.count}`)
return this.count - 1
}
pop() {
if(this.count == 0) {
return undefined
}
const item = this.items[this.count - 1]
this.count--
this.items.length--
console.log(`${item} removed`)
return item
}
peek() {
if(this.count == 0) {
return undefined
}
console.log(`Returning ${this.items[this.count-1]}`)
return this.items[this.count-1]
}
isEmpty() {
return this.count === 0
}
size() {
return this.count
}
print() {
let str = ''
for (let i=0; i < this.count; i++) {
str += this.items[i] + ' '
}
console.log(str)
return str
}
clear() {
this.count = 0
this.items = []
return this.items
}
}
const stack = new Stack()
stack.push(100)
stack.push(200)
stack.push(300)
stack.print()
console.log(stack)
module.exports = Stack |
<gh_stars>1-10
export default class FactuProProductVO {
public static API_TYPE_ID: string = "fp_product";
// L'id est fourni par l'API
public id: number;
public _type: string = FactuProProductVO.API_TYPE_ID;
// Référence interne
public ref: string;
// Libellé
public title: string;
// Prix unitaire HT
public unit_price: number;
// Taux de TVA
public vat: number;
// Unité de mesure
public measure: string;
// Type de facturation
public nature: number;
// Catégorie
public category_id: number;
// Notes internes
public notes: string;
// Créé le
public created_at: Date;
// Modifié le
public updated_at: Date;
// Enregistrement marqué comme à supprimer
public soft_deleted: boolean;
// Date définitive de la suppression
public hard_delete_on: Date;
// API - Référence numérique libre
public api_id: number;
// API - Texte libre
public api_custom: string;
} |
#include "src/fw.h"
void keyCallback(GLFWwindow* window, int key, int scancode, int action, int mods) {
switch (key) {
case GLFW_KEY_ESCAPE:
if (action == GLFW_PRESS)
glfwSetWindowShouldClose(window, GL_TRUE);
break;
default:
break;
}
}
void renderFunction() {
glClearColor(0.0, 0.0, 0.0, 0.0);
glClear(GL_COLOR_BUFFER_BIT);
}
int main(int argc, char** argv) {
FW fw("Basic Sample");
fw.setKeyCallback(keyCallback);
fw.runMainLoop(renderFunction);
return 0;
}
|
#!/bin/bash
incfile=$1
MCPU=$2
echo " " >$incfile
echo "# $incfile: Generated from SOURCES file " >>$incfile
echo "# for MCPU = $MCPU " >>$incfile
echo " " >>$incfile
for pair in `cat SOURCES` ; do
i="${pair##*:}"
source_type="${pair%:*}"
typefile="${i##*.}"
filename="${i##*/}"
dirname="${i%/*}"
echo "\$(BCDIR)/${i}.$MCPU.bc: \$($source_type)/${i}" >>$incfile
if [ $typefile == "cl" ] ; then
echo " @echo CL $source_type: $i -mcpu=$MCPU" >>$incfile
echo " \$(Verb) mkdir -p \$(BCDIR)/$dirname" >>$incfile
echo " \$(Verb) mkdir -p \$(DEPSDIR)/$dirname" >>$incfile
echo " \$(Verb) \$(LLVM_CC) \$(CLFLAGS) -mcpu=$MCPU -I\$($source_type)/$dirname -MMD -MF \$(DEPSDIR)/${i}.d -o \$(BCDIR)/$i.$MCPU.bc \$($source_type)/${i}" >> $incfile
else
echo " @echo AS $source_type: $i -mcpu=$MCPU" >>$incfile
echo " \$(Verb) mkdir -p \$(BCDIR)/$dirname " >>$incfile
echo " \$(Verb) \$(LLVM_AS) -o \$(BCDIR)/$i.$MCPU.bc \$($source_type)/${i}" >>$incfile
fi
echo " " >>$incfile
done
echo " " >>$incfile
echo "# The big list of needed bc files to link together " >>$incfile
objline="OBJ$MCPU ="
for pair in `cat SOURCES` ; do
i="${pair##*:}"
objline="${objline} \$(BCDIR)/${i}.$MCPU.bc"
done
echo "${objline}" >>$incfile
echo " " >>$incfile
echo "\$(BCDIR)/convert.$MCPU.bc: \$(UTILDIR)/convert.cl " >>$incfile
echo " @echo CL convert.cl - $MCPU" >>$incfile
echo " \$(Verb) mkdir -p \$(BCDIR)" >>$incfile
echo " \$(Verb) \$(LLVM_CC) \$(CLFLAGS) -mcpu=$MCPU -I\$(LIBAMDGCN)/libclc_overrids/include -I\$(LIBCLC)/generic/lib -MMD -MF \$(DEPSDIR)/convert.cl.d -o \$(BCDIR)/convert.$MCPU.bc \$(UTILDIR)/convert.cl " >>$incfile
echo " " >>$incfile
echo "\$(BCDIR)/subnormal_disable.$MCPU.bc: \$(LIBCLC)/generic/lib/subnormal_disable.ll" >>$incfile
echo " @echo AS subnormal_disable.ll" >>$incfile
echo " \$(Verb) \$(LLVM_AS) -o \$(BCDIR)/subnormal_disable.$MCPU.bc \$(LIBCLC)/generic/lib/subnormal_disable.ll" >>$incfile
echo " " >>$incfile
echo "\$(BCDIR)/builtins.link.$MCPU.bc: \$(OBJ$MCPU) \$(BCDIR)/convert.$MCPU.bc " >>$incfile
echo " @echo LINK all bc files for $MCPU" >>$incfile
echo " \$(Verb) \$(LLVM_LINK) --suppress-warnings \$(OBJ$MCPU) \$(BCDIR)/convert.$MCPU.bc -o \$(BCDIR)/builtins.link.$MCPU.bc" >>$incfile
echo " " >>$incfile
echo "\$(BCDIR)/builtins.opt.$MCPU.bc: \$(BCDIR)/builtins.link.$MCPU.bc " >>$incfile
echo " @echo LLVM-OPT -O3" >>$incfile
echo " \$(Verb) \$(LLVM_OPT) -O3 -o \$(BCDIR)/builtins.opt.$MCPU.bc \$(BCDIR)/builtins.link.$MCPU.bc" >>$incfile
echo " " >>$incfile
echo "\$(BCDIR)/libamdgcn.$MCPU.bc: \$(BCDIR)/builtins.opt.$MCPU.bc \$(UTILDIR)/prepare-builtins" >>$incfile
echo " @echo LAST STEP! Call prepare-builtins to create libamdgcn.$MCPU.bc">>$incfile
echo " \$(Verb) \$(UTILDIR)/prepare-builtins -o \$(BCDIR)/libamdgcn.$MCPU.bc \$(BCDIR)/builtins.opt.$MCPU.bc" >>$incfile
echo " " >>$incfile
echo "$MCPU: \$(UTILDIR)/makefile.$MCPU.inc \$(BCDIR)/libamdgcn.$MCPU.bc \$(BCDIR)/subnormal_disable.$MCPU.bc " >>$incfile
|
const mongoose = require('mongoose');
const MONGODB_URI = process.env.MONGODB_URI
const db = mongoose
.connect(MONGODB_URI, {
useNewUrlParser: true,
useUnifiedTopology: true,
// These are used to prevent the deprecation warning
// for unique values in tables
useFindAndModify: false,
useCreateIndex: true
})
.then(() => {
console.log('!~DB Connected~!')
})
.catch(e => {
console.error('Connection error', e.message)
})
module.exports = db |
<reponame>mhk-github/Ruby
#! /usr/bin/env ruby
###############################################################################
# FILE : reward_none.rb
# DESCRIPTION : A class to represent no reward for the customer purchase.
# LICENSE : MIT
###############################################################################
###############################################################################
# IMPORTS
###############################################################################
require_relative "interface_reward"
###############################################################################
# CLASSES
###############################################################################
class RewardNone < InterfaceReward
# A class to represent no reward at all.
@@reward = "No reward at this time."
def initialize
end
def what
@@reward
end
end
###############################################################################
# END
###############################################################################
# Local variables:
# mode: ruby
# End:
|
package cyclops.container.immutable.impl;
import cyclops.container.immutable.ImmutableSet;
import cyclops.container.persistent.PersistentBag;
import cyclops.function.higherkinded.DataWitness.bag;
import cyclops.function.higherkinded.Higher;
import cyclops.reactive.ReactiveSeq;
import java.io.Serializable;
import java.util.Iterator;
import java.util.Objects;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Stream;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
import org.reactivestreams.Publisher;
@AllArgsConstructor(access = AccessLevel.PRIVATE)
public final class Bag<T> implements ImmutableSet<T>, PersistentBag<T>, Higher<bag, T>, Serializable {
private static final long serialVersionUID = 1L;
private final HashMap<T, Integer> map;
private final int size;
public static <T> Bag<T> empty() {
return new Bag<>(HashMap.empty(),
0);
}
public static <T> Bag<T> singleton(T value) {
return Bag.<T>empty().plus(value);
}
public static <T> Bag<T> of(T... values) {
Bag<T> res = empty();
for (T next : values) {
res = res.plus(next);
}
return res;
}
public static <T> Bag<T> fromStream(Stream<T> values) {
return ReactiveSeq.fromStream(values)
.foldLeft(empty(),
(a, b) -> a.plus(b));
}
public static <T> Bag<T> fromIterable(Iterable<? extends T> values) {
return ReactiveSeq.fromIterable(values)
.foldLeft(empty(),
(a, b) -> a.plus(b));
}
public int instances(T type) {
return map.getOrElse(type,
0);
}
public int size() {
return size;
}
@Override
public Bag<T> add(T value) {
return plus(value);
}
@Override
public boolean isEmpty() {
return map.isEmpty();
}
@Override
public <R> Bag<R> map(Function<? super T, ? extends R> fn) {
return fromStream(stream().map(fn));
}
@Override
public <R> Bag<R> flatMap(Function<? super T, ? extends ImmutableSet<? extends R>> fn) {
return fromStream(stream().flatMap(fn.andThen(s -> s.stream())));
}
@Override
public <R> Bag<R> concatMap(Function<? super T, ? extends Iterable<? extends R>> fn) {
return fromStream(stream().concatMap(fn));
}
@Override
public <R> Bag<R> mergeMap(Function<? super T, ? extends Publisher<? extends R>> fn) {
return fromStream(stream().mergeMap(fn));
}
@Override
public <R> Bag<R> mergeMap(int maxConcurecy,
Function<? super T, ? extends Publisher<? extends R>> fn) {
return fromStream(stream().mergeMap(maxConcurecy,
fn));
}
@Override
public <U> Bag<U> unitIterable(Iterable<U> it) {
return fromIterable(it);
}
@Override
public Bag<T> filter(Predicate<? super T> predicate) {
return fromStream(stream().filter(predicate));
}
@Override
public <R> Bag<R> unitStream(Stream<R> stream) {
return fromStream(stream);
}
@Override
public boolean containsValue(final T e) {
return map.get(e)
.isPresent();
}
public Bag<T> plus(final T value) {
return new Bag<>(map.put(value,
map.get(value)
.orElse(0) + 1),
size + 1);
}
@Override
public Bag<T> plusAll(Iterable<? extends T> list) {
Bag<T> res = this;
for (T next : list) {
res = res.plus(next);
}
return res;
}
@Override
public Bag<T> removeAll(Iterable<? extends T> list) {
Bag<T> res = this;
for (T next : list) {
res = res.removeValue(next);
}
return res;
}
@Override
public Bag<T> removeValue(final T value) {
int n = map.get(value)
.orElse(0);
if (n == 0) {
return this;
}
if (n == 1) {
return new Bag<>(map.remove(value),
size - 1);
}
return new Bag<>(map.put(value,
n - 1),
size - 1);
}
public ReactiveSeq<T> stream() {
return ReactiveSeq.fromIterable(() -> map.iterator())
.flatMap(t -> ReactiveSeq.of(t._1())
.cycle(t._2()));
}
@Override
public Iterator<T> iterator() {
return stream().iterator();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null) {
return false;
}
if (o instanceof ImmutableSet) {
ImmutableSet bag = (ImmutableSet) o;
return equalToIteration(bag);
}
if (o instanceof PersistentBag) {
PersistentBag bag = (PersistentBag) o;
return equalToIteration(bag);
}
return false;
}
@Override
public int hashCode() {
return Objects.hash(map,
size);
}
@Override
public String toString() {
return map.mkString();
}
}
|
import { LifecycleClass } from 'inferno-shared';
import { VNode } from '../core/VNodes';
export declare function mount(vNode: VNode, parentDom: Element | null, lifecycle: LifecycleClass, context: Object, isSVG: boolean): any;
export declare function mountText(vNode: VNode, parentDom: Element | null): any;
export declare function mountVoid(vNode: VNode, parentDom: Element | null): Text;
export declare function mountElement(vNode: VNode, parentDom: Element | null, lifecycle: LifecycleClass, context: Object, isSVG: boolean): Element;
export declare function mountArrayChildren(children: any, dom: Element, lifecycle: LifecycleClass, context: Object, isSVG: boolean): void;
export declare function mountComponent(vNode: VNode, parentDom: Element | null, lifecycle: LifecycleClass, context: Object, isSVG: boolean, isClass: boolean): any;
export declare function mountClassComponentCallbacks(vNode: VNode, ref: any, instance: any, lifecycle: LifecycleClass): void;
export declare function mountFunctionalComponentCallbacks(ref: any, dom: any, lifecycle: LifecycleClass): void;
export declare function mountRef(dom: Element, value: any, lifecycle: LifecycleClass): void;
|
#!/usr/bin/env bash
code=`/bin/curl --head --silent --show-error --write-out '%{http_code}' --output /tmp/head http://127.0.0.1:8080/handshakes`
[[ $code -eq 200 ]] || { echo "execution failed $code"; echo /tmp/head; exit 1; }
|
<filename>dist/bin/app.js
"use strict";
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.App = void 0;
const commander = require("commander");
const axios_1 = require("axios");
const Image = require('ascii-art-image');
class App {
constructor() {
this.program = commander;
this.package = require('../../package.json');
this.apiList = require('../../api-list.json');
}
initialize() {
this.program
.version(this.package.version)
.option('-m, --message [value]', 'Say hello!')
.parse(process.argv);
this.getData();
}
getData() {
return __awaiter(this, void 0, void 0, function* () {
const i = Math.floor(Math.random() * this.apiList.length);
const ii = Math.floor(Math.random() * this.apiList[i].endPoints.length);
const oneToOneHundred = Math.floor(Math.random() * 100 + 1);
const api = this.apiList[i].api + this.apiList[i].endPoints[ii];
if (oneToOneHundred > 95) {
let complete = false;
const waitSymbols = ['︷', '︵', '︹', '︺', '︶', '︸', '︶', '︺', '︹', '︵'];
let i = 0;
process.stdout.write('\n');
const timer = setInterval(() => {
process.stdout.write(waitSymbols[i]);
i = ++i < waitSymbols.length ? i : 0;
if (complete) {
clearInterval(timer);
setTimeout(() => {
this.callApi(api);
}, 3000);
}
}, 100);
try {
const response = yield axios_1.default.get('https://thatcopy.pw/catapi/rest/');
const image = new Image({
filepath: response.data.url,
alphabet: 'variant4',
});
image.write((err, rendered) => {
if (err) {
return console.error(err.message || err);
}
complete = true;
process.stdout.write('\n' + rendered);
});
}
catch (error) {
console.error(error.message || error);
}
}
else {
this.callApi(api);
}
});
}
callApi(api) {
axios_1.default.get(api)
.then((data) => {
Object.keys(data.data).forEach((key) => {
process.stdout.write(`${key}: ${data.data[key]}`);
});
this.getData();
})
.catch((err) => {
console.log(err.message || err);
});
}
}
exports.App = App;
const app = new App();
app.initialize();
|
import React, { Component } from "react";
class Form extends Component {
state = {
first_name: "",
last_name: ""
};
handleChange = e => {
const { name, value } = e.target;
this.setState({
[name]: value
});
};
render() {
return (
<form>
<input
type="text"
name="first_name"
value={this.state.first_name}
onChange={this.handleChange}
/>
<input
type="text"
name="last_name"
value={this.state.last_name}
onChange={this.handleChange}
/>
</form>
);
}
};
export default Form; |
CREATE DATABASE parsetmpl_db
USE parsetmpl_db
CREATE TABLE template (
service_name varchar(30) not null,
feature_name varchar(30) not null,
feature_id varchar(30) not null,
feature_descr varchar(60) not null,
endpoints_path varchar(60) not null,
endpoints_methods varchar(60) not null
) |
#! /bin/ksh
always_pfl(){
route "${cblue}>> always_pfl${cnormal}"
route "${cblue}<< always_pfl${cnormal}"
}
configure_pfl(){
route "${cblue}>> configure_pfl${cnormal}"
export PARFLOW_INS="$pfldir/bin"
export PARFLOW_BLD="$pfldir/build"
# export PFV="oas-gpu"
export RMM_ROOT=$pfldir/rmm
#
C_FLAGS="-fopenmp -Wall -Werror"
flagsSim=" -DMPIEXEC_EXECUTABLE=$(which srun)"
if [[ $withOAS == "true" ]]; then
flagsSim+=" -DPARFLOW_AMPS_LAYER=oas3"
else
flagsSim+=" -DPARFLOW_AMPS_LAYER=mpi1"
fi
flagsSim+=" -DOAS3_ROOT=$oasdir/$platform"
flagsSim+=" -DSILO_ROOT=$EBROOTSILO"
flagsSim+=" -DHYPRE_ROOT=$EBROOTHYPRE"
flagsSim+=" -DCMAKE_C_FLAGS=$C_FLAGS"
flagsSim+=" -DCMAKE_BUILD_TYPE=Release"
flagsSim+=" -DPARFLOW_ENABLE_TIMING=TRUE"
flagsSim+=" -DCMAKE_INSTALL_PREFIX=$PARFLOW_INS"
flagsSim+=" -DNETCDF_DIR=$ncdfPath"
flagsSim+=" -DNETCDF_Fortran_ROOT=$ncdfPath"
flagsSim+=" -DTCL_TCLSH=$tclPath/bin/tclsh8.6"
flagsSim+=" -DPARFLOW_AMPS_SEQUENTIAL_IO=on"
flagsSim+=" -DPARFLOW_ENABLE_SLURM=TRUE"
#
pcc="$mpiPath/bin/mpicc"
pfc="$mpiPath/bin/mpif90"
pf77="$mpiPath/bin/mpif77"
pcxx="$mpiPath/bin/mpic++"
#
if [ -d ${rootdir}/${mList[3]} ] ; then
comment " remove ${mList[3]}"
rm -rf ${rootdir}/${mList[3]} $pfldir >> $log_file 2>> $err_file
check
fi
comment " git clone parflow3_7 "
cd $rootdir
# git clone https://github.com/hokkanen/parflow.git >> $log_file 2>> $err_file
git clone https://github.com/parflow/parflow.git >> $log_file 2>> $err_file
check
mv parflow parflow3_7
cp -rf ${rootdir}/${mList[3]} $pfldir >> $log_file 2>> $err_file
mkdir -p $PARFLOW_INS
mkdir -p $PARFLOW_BLD
cd $pfldir
check
# comment " git checkout to $PFV \n"
# git checkout ${PFV} >> $log_file 2>> $err_file
# check
comment " parflow is configured for $processor "
check
if [[ $processor == "GPU" ]]; then
comment "module load CUDA mpi-settings/CUDA "
module load CUDA mpi-settings/CUDA >> $log_file 2>> $err_file
check
comment " additional configuration options for GPU are set "
flagsSim+=" -DPARFLOW_ACCELERATOR_BACKEND=cuda"
flagsSim+=" -DRMM_ROOT=$RMM_ROOT"
flagsSim+=" -DCMAKE_CUDA_RUNTIME_LIBRARY=Shared"
check
comment " git clone RAPIDS Memory Manager "
if [ -d $RMM_ROOT ] ; then
comment " remove $RMM_ROOT "
rm -rf $RMM_ROOT >> $log_file 2>> $err_file
check
fi
git clone -b branch-0.10 --single-branch --recurse-submodules https://github.com/hokkanen/rmm.git >> $log_file 2>> $err_file
check
mkdir -p $RMM_ROOT/build
cd $RMM_ROOT/build
comment " configure RMM: RAPIDS Memory Manager "
cmake ../ -DCMAKE_INSTALL_PREFIX=$RMM_ROOT >> $log_file 2>> $err_file
check
comment " make RMM "
make -j >> $log_file 2>> $err_file
check
comment " make install RMM "
make install >> $log_file 2>> $err_file
check
fi
c_configure_pfl
route "${cblue}<< configure_pfl${cnormal}"
}
make_pfl(){
route "${cblue}>> make_pfl${cnormal}"
c_make_pfl
route "${cblue}<< make_pfl${cnormal}"
}
substitutions_pfl(){
route "${cblue}>> substitutions_pfl${cnormal}"
route "${cblue}<< substitutions_pfl${cnormal}"
}
setup_pfl(){
route "${cblue}>> setup_pfl${cnormal}"
c_setup_pfl
route "${cblue}<< setup_pfl${cnormal}"
}
|
#!/bin/bash
host="${HOST:-localhost:8080}"
curl -X POST ${host}/businessservices -d \
'{
"createUser": "tackle",
"name": "Marketing",
"Description": "Marketing Dept.",
"owner": {
"id": 1
}
}' | jq -M .
|
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
# Copyright 2020 Authors of Arktos - file modified.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for GCI distro
source "${KUBE_ROOT}/cluster/gce/gci/helper.sh"
# create-master-instance creates the master instance. If called with
# an argument, the argument is used as the name to a reserved IP
# address for the master. (In the case of upgrade/repair, we re-use
# the same IP.)
#
# It requires a whole slew of assumed variables, partially due to to
# the call to write-master-env. Listing them would be rather
# futile. Instead, we list the required calls to ensure any additional
#
# variables are set:
# ensure-temp-dir
# detect-project
# get-bearer-token
function create-master-instance {
local address=""
local private_network_ip=""
[[ -n ${1:-} ]] && address="${1}"
[[ -n ${2:-} ]] && private_network_ip="${2}"
write-master-env
ensure-gci-metadata-files
create-master-instance-internal "${MASTER_NAME}" "${address}" "${private_network_ip}"
}
function replicate-master-instance() {
local existing_master_zone="${1}"
local existing_master_name="${2}"
local existing_master_replicas="${3}"
local kube_env="$(get-metadata "${existing_master_zone}" "${existing_master_name}" kube-env)"
# Substitute INITIAL_ETCD_CLUSTER to enable etcd clustering.
kube_env="$(echo "${kube_env}" | grep -v "INITIAL_ETCD_CLUSTER")"
kube_env="$(echo -e "${kube_env}\nINITIAL_ETCD_CLUSTER: '${existing_master_replicas},${REPLICA_NAME}'")"
# Substitute INITIAL_ETCD_CLUSTER_STATE
kube_env="$(echo "${kube_env}" | grep -v "INITIAL_ETCD_CLUSTER_STATE")"
kube_env="$(echo -e "${kube_env}\nINITIAL_ETCD_CLUSTER_STATE: 'existing'")"
ETCD_CA_KEY="$(echo "${kube_env}" | grep "ETCD_CA_KEY" | sed "s/^.*: '//" | sed "s/'$//")"
ETCD_CA_CERT="$(echo "${kube_env}" | grep "ETCD_CA_CERT" | sed "s/^.*: '//" | sed "s/'$//")"
create-etcd-certs "${REPLICA_NAME}" "${ETCD_CA_CERT}" "${ETCD_CA_KEY}"
kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_KEY")"
kube_env="$(echo -e "${kube_env}\nETCD_PEER_KEY: '${ETCD_PEER_KEY_BASE64}'")"
kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_CERT")"
kube_env="$(echo -e "${kube_env}\nETCD_PEER_CERT: '${ETCD_PEER_CERT_BASE64}'")"
ETCD_APISERVER_CA_KEY="$(echo "${kube_env}" | grep "ETCD_APISERVER_CA_KEY" | sed "s/^.*: '//" | sed "s/'$//")"
ETCD_APISERVER_CA_CERT="$(echo "${kube_env}" | grep "ETCD_APISERVER_CA_CERT" | sed "s/^.*: '//" | sed "s/'$//")"
create-etcd-apiserver-certs "etcd-${REPLICA_NAME}" "${REPLICA_NAME}" "${ETCD_APISERVER_CA_CERT}" "${ETCD_APISERVER_CA_KEY}"
kube_env="$(echo "${kube_env}" | grep -v "ETCD_APISERVER_SERVER_KEY")"
kube_env="$(echo -e "${kube_env}\nETCD_APISERVER_SERVER_KEY: '${ETCD_APISERVER_SERVER_KEY_BASE64}'")"
kube_env="$(echo "${kube_env}" | grep -v "ETCD_APISERVER_SERVER_CERT")"
kube_env="$(echo -e "${kube_env}\nETCD_APISERVER_SERVER_CERT: '${ETCD_APISERVER_SERVER_CERT_BASE64}'")"
kube_env="$(echo "${kube_env}" | grep -v "ETCD_APISERVER_CLIENT_KEY")"
kube_env="$(echo -e "${kube_env}\nETCD_APISERVER_CLIENT_KEY: '${ETCD_APISERVER_CLIENT_KEY_BASE64}'")"
kube_env="$(echo "${kube_env}" | grep -v "ETCD_APISERVER_CLIENT_CERT")"
kube_env="$(echo -e "${kube_env}\nETCD_APISERVER_CLIENT_CERT: '${ETCD_APISERVER_CLIENT_CERT_BASE64}'")"
echo "${kube_env}" > ${KUBE_TEMP}/master-kube-env.yaml
get-metadata "${existing_master_zone}" "${existing_master_name}" cluster-name > "${KUBE_TEMP}/cluster-name.txt"
get-metadata "${existing_master_zone}" "${existing_master_name}" gci-update-strategy > "${KUBE_TEMP}/gci-update.txt"
get-metadata "${existing_master_zone}" "${existing_master_name}" gci-ensure-gke-docker > "${KUBE_TEMP}/gci-ensure-gke-docker.txt"
get-metadata "${existing_master_zone}" "${existing_master_name}" gci-docker-version > "${KUBE_TEMP}/gci-docker-version.txt"
get-metadata "${existing_master_zone}" "${existing_master_name}" kube-master-certs > "${KUBE_TEMP}/kube-master-certs.yaml"
get-metadata "${existing_master_zone}" "${existing_master_name}" cluster-location > "${KUBE_TEMP}/cluster-location.txt"
get-metadata "${existing_master_zone}" "${existing_master_name}" controllerconfig > "${KUBE_TEMP}/controllerconfig.json"
get-metadata "${existing_master_zone}" "${existing_master_name}" networktemplate > "${KUBE_TEMP}/network.tmpl"
create-master-instance-internal "${REPLICA_NAME}"
}
function create-master-instance-internal() {
local gcloud="gcloud"
local retries=5
local sleep_sec=10
if [[ "${MASTER_SIZE##*-}" -ge 64 ]]; then # remove everything up to last dash (inclusive)
# Workaround for #55777
retries=30
sleep_sec=60
fi
local -r master_name="${1}"
local -r address="${2:-}"
local -r private_netwrok_ip="${3:-}"
local preemptible_master=""
if [[ "${PREEMPTIBLE_MASTER:-}" == "true" ]]; then
preemptible_master="--preemptible --maintenance-policy TERMINATE"
fi
local enable_ip_aliases
if [[ "${NODE_IPAM_MODE:-}" == "CloudAllocator" ]]; then
enable_ip_aliases=true
else
enable_ip_aliases=false
fi
local network=$(make-gcloud-network-argument \
"${NETWORK_PROJECT}" "${REGION}" "${NETWORK}" "${SUBNETWORK:-}" \
"${address:-}" "${private_netwrok_ip:-}" "${enable_ip_aliases:-}" "${IP_ALIAS_SIZE:-}")
echo "DBG: MASTER_EXTRA_METADATA : ${MASTER_EXTRA_METADATA}"
local metadata="kube-env=${KUBE_TEMP}/master-kube-env.yaml"
metadata="${metadata},kubelet-config=${KUBE_TEMP}/master-kubelet-config.yaml"
metadata="${metadata},user-data=${KUBE_ROOT}/cluster/gce/gci/master.yaml"
metadata="${metadata},configure-sh=${KUBE_ROOT}/cluster/gce/gci/configure.sh"
metadata="${metadata},apiserver-config=${KUBE_ROOT}/hack/apiserver.config"
metadata="${metadata},cluster-location=${KUBE_TEMP}/cluster-location.txt"
metadata="${metadata},cluster-name=${KUBE_TEMP}/cluster-name.txt"
metadata="${metadata},gci-update-strategy=${KUBE_TEMP}/gci-update.txt"
metadata="${metadata},gci-ensure-gke-docker=${KUBE_TEMP}/gci-ensure-gke-docker.txt"
metadata="${metadata},gci-docker-version=${KUBE_TEMP}/gci-docker-version.txt"
metadata="${metadata},kube-master-certs=${KUBE_TEMP}/kube-master-certs.yaml"
metadata="${metadata},cluster-location=${KUBE_TEMP}/cluster-location.txt"
metadata="${metadata},controllerconfig=${KUBE_TEMP}/controllerconfig.json"
if [[ -s ${KUBE_TEMP}/network.tmpl ]]; then
metadata="${metadata},networktemplate=${KUBE_TEMP}/network.tmpl"
fi
metadata="${metadata},${MASTER_EXTRA_METADATA}"
local disk="name=${master_name}-pd"
disk="${disk},device-name=master-pd"
disk="${disk},mode=rw"
disk="${disk},boot=no"
disk="${disk},auto-delete=no"
for attempt in $(seq 1 ${retries}); do
if result=$(${gcloud} compute instances create "${master_name}" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--machine-type "${MASTER_SIZE}" \
--image-project="${MASTER_IMAGE_PROJECT}" \
--image "${MASTER_IMAGE}" \
--tags "${MASTER_TAG}" \
--scopes "storage-ro,compute-rw,monitoring,logging-write" \
--metadata-from-file "${metadata}" \
--disk "${disk}" \
--boot-disk-size "${MASTER_ROOT_DISK_SIZE}" \
${MASTER_MIN_CPU_ARCHITECTURE:+"--min-cpu-platform=${MASTER_MIN_CPU_ARCHITECTURE}"} \
${preemptible_master} \
${network} 2>&1); then
echo "${result}" >&2
return 0
else
echo "${result}" >&2
if [[ ! "${result}" =~ "try again later" ]]; then
echo "Failed to create master instance due to non-retryable error" >&2
return 1
fi
sleep $sleep_sec
fi
done
echo "Failed to create master instance despite ${retries} attempts" >&2
return 1
}
function get-metadata() {
local zone="${1}"
local name="${2}"
local key="${3}"
gcloud compute ssh "${name}" \
--project "${PROJECT}" \
--zone "${zone}" \
--command "curl \"http://metadata.google.internal/computeMetadata/v1/instance/attributes/${key}\" -H \"Metadata-Flavor: Google\"" 2>/dev/null
}
|
<reponame>kaomte/rails_typed_settings
class CreateSettings < ActiveRecord::Migration
def change
create_table :settings do |t|
t.string :var_name, null: false
t.string :var_type, null: false
t.text :description
t.text :transformed_value
t.text :default_transformed_value
t.timestamps null: false
end
add_index :settings, %i(var_name), unique: true
end
end
|
public class GCD {
public static void main(String[] args) {
int num1 = 86;
int num2 = 12;
int gcd = 1;
for(int i = 1; i <= num1 && i <= num2; ++i)
{
// Checks if i is factor of both integers
if(num1 % i == 0 && num2 % i == 0)
gcd = i;
}
System.out.printf("GCD of %d and %d is: %d", num1, num2, gcd);
}
} |
#!/bin/bash
gpu='1,5'
data_dir='/home/bqw/nlp_data/cnndm/graph_bert/cnndm'
model_dir='../models/abs_gate_cnn'
result_dir='../logs/abs_gate_cnn'
ip=5515
log_dir='../logs/abs_gate_cnn'
train_step=140000
test_log='../logs/abs_gate_valid_cnn'
copy=False
lr_dec=0.2
warm_dec=10000
grad_norm=0
gate=True
max_pos=512
max_tgt_len=200
python train.py -task abs -mode train -bert_data_path ${data_dir} -dec_dropout 0.2 -model_path ${model_dir} -sep_optim true -lr_bert 0.002 -lr_dec ${lr_dec} -save_checkpoint_steps 2000 -batch_size 280 -train_steps ${train_step} -report_every 50 -accum_count 5 -use_bert_emb true -use_interval true -warmup_steps_bert 20000 -warmup_steps_dec ${warm_dec} -max_pos 512 -visible_gpus ${gpu} -log_file ${log_dir} -copy ${copy} -init_method tcp://localhost:${ip}
#python train.py -task abs -mode train -bert_data_path ${data_dir} -dec_dropout 0.2 -model_path ${model_dir} -sep_optim true -lr_bert 0.002 -lr_dec ${lr_dec} -save_checkpoint_steps 2000 -batch_size 280 -train_steps ${train_step} -report_every 50 -accum_count 5 -use_bert_emb true -use_interval true -warmup_steps_bert 20000 -warmup_steps_dec ${warm_dec} -max_pos 512 -visible_gpus ${gpu} -log_file ${log_dir} -copy ${copy} -max_grad_norm ${grad_norm} -init_method tcp://localhost:${ip}
#python train.py -task abs -mode train -bert_data_path ${data_dir} -dec_dropout 0.2 -model_path ${model_dir} -sep_optim true -lr_bert 0.002 -lr_dec ${lr_dec} -save_checkpoint_steps 2000 -batch_size 280 -train_steps ${train_step} -report_every 50 -accum_count 5 -use_bert_emb true -use_interval true -warmup_steps_bert 20000 -warmup_steps_dec ${warm_dec} -max_pos ${max_pos} -visible_gpus ${gpu1},${gpu2} -log_file ${log_dir} -copy ${copy} -max_grad_norm ${grad_norm} -init_method tcp://localhost:${ip} -max_tgt_len ${max_tgt_len}
python train.py -task abs -mode validate -batch_size 3000 -test_batch_size 500 -bert_data_path ${data_dir} -log_file ${test_log} -sep_optim true -use_interval true -visible_gpus ${gpu1} -max_pos ${max_pos} -max_length 200 -alpha 0.95 -min_length 50 -result_path ${result_dir} -model_path ${model_dir} -test_all True -copy ${copy}
|
from pyVmomi import vim
from pyVim.connect import SmartConnect, Disconnect
import ssl
class VSphereManager:
def __init__(self, server, username, password, skip_verification=False):
self.server = server
self.username = username
self.password = password
self.skip_verification = skip_verification
self.vsphere_client = None
def authenticate(self):
if self.skip_verification:
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_NONE
session = SmartConnect(host=self.server, user=self.username, pwd=self.password, sslContext=context)
else:
session = SmartConnect(host=self.server, user=self.username, pwd=self.password)
self.vsphere_client = session
def get_vm_id(self, vm_name):
content = self.vsphere_client.RetrieveContent()
container = content.viewManager.CreateContainerView(content.rootFolder, [vim.VirtualMachine], True)
for vm in container.view:
if vm.name == vm_name:
return vm._moId
return None
def get_resource_pool_id(self, datacenter_name, resource_pool_name):
content = self.vsphere_client.RetrieveContent()
datacenter = None
for dc in content.rootFolder.childEntity:
if isinstance(dc, vim.Datacenter) and dc.name == datacenter_name:
datacenter = dc
break
if datacenter:
resource_pool = datacenter.hostFolder.childEntity[0].resourcePool
while resource_pool:
if resource_pool.name == resource_pool_name:
return resource_pool._moId
resource_pool = resource_pool.resourcePool
return None
def execute_action_on_vm(self, vm_name, action):
vm_id = self.get_vm_id(vm_name)
if vm_id:
# Implement the specific action to be executed on the VM
# Example: Power on the VM
vm = self.vsphere_client.content.searchIndex.FindByUuid(None, vm_id, True, False)
if action == "power_on":
vm.PowerOnVM_Task()
elif action == "power_off":
vm.PowerOffVM_Task()
# Add more actions as needed
else:
print(f"VM with name '{vm_name}' not found.")
def disconnect(self):
if self.vsphere_client:
Disconnect(self.vsphere_client) |
#!/usr/bin/env bash
# sudo loop
sudo -v
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
# setup compilers first
sudo port -N install gcc11 mpich-default
# port select --list gcc
sudo port -N select --set gcc mp-gcc11
# port select --list mpi
sudo port -N select --set mpi mpich-mp-fortran
grep -v '#' port.txt | xargs sudo port -N install
# configure git after macports's git is installed
git config --global pull.rebase false
sudo port -N load smartmontools
# sudo port -N load openssh
sudo port -N load rsync
|
<reponame>vfdev-5/polyaxon
import * as _ from 'lodash';
import * as React from 'react';
import * as actions from '../../actions/project';
import { getProjectUrl, getUserUrl } from '../../constants/utils';
import ActivityLogs from '../../containers/activityLogs';
import Builds from '../../containers/builds';
import Experiments from '../../containers/experiments';
import Groups from '../../containers/groups';
import Jobs from '../../containers/jobs';
import { BookmarkInterface } from '../../interfaces/bookmarks';
import { ProjectModel } from '../../models/project';
import { getBookmark } from '../../utils/bookmarks';
import Breadcrumb from '../breadcrumb';
import { EmptyList } from '../empty/emptyList';
import ProjectInstructions from '../instructions/projectInstructions';
import LinkedTab from '../linkedTab';
import ProjectActions from './projectActions';
import ProjectOverview from './projectOverview';
export interface Props {
project: ProjectModel;
onUpdate: (updateDict: { [key: string]: any }) => actions.ProjectAction;
onDelete: () => actions.ProjectAction;
fetchData: () => actions.ProjectAction;
bookmark: () => actions.ProjectAction;
unbookmark: () => actions.ProjectAction;
startNotebook: () => actions.ProjectAction;
stopNotebook: () => actions.ProjectAction;
startTensorboard: () => actions.ProjectAction;
stopTensorboard: () => actions.ProjectAction;
}
export default class ProjectDetail extends React.Component<Props, {}> {
public componentDidMount() {
this.props.fetchData();
}
public render() {
const project = this.props.project;
if (_.isNil(project)) {
return EmptyList(false, 'project', 'project');
}
const bookmark: BookmarkInterface = getBookmark(
this.props.project.bookmarked, this.props.bookmark, this.props.unbookmark);
const projectUrl = getProjectUrl(project.user, project.name);
return (
<div className="row">
<div className="col-md-12">
<Breadcrumb
icon="fa-server"
links={[
{name: project.user, value: getUserUrl(project.user)},
{name: project.name}]}
bookmark={bookmark}
actions={
<ProjectActions
onDelete={this.props.onDelete}
notebookActionCallback={
project.has_tensorboard ? this.props.stopNotebook : this.props.startNotebook}
tensorboardActionCallback={
project.has_tensorboard ? this.props.stopTensorboard : this.props.startTensorboard}
hasNotebook={project.has_notebook}
hasTensorboard={project.has_tensorboard}
pullRight={true}
/>
}
/>
<LinkedTab
baseUrl={projectUrl}
tabs={[
{
title: 'Overview',
component: <ProjectOverview
project={project}
onUpdate={this.props.onUpdate}
/>,
relUrl: ''
}, {
title: 'Experiments',
component: <Experiments
user={project.user}
projectName={project.unique_name}
showBookmarks={true}
useCheckbox={true}
useFilters={true}
/>,
relUrl: 'experiments'
}, {
title: 'Experiment groups',
component: <Groups
user={project.user}
projectName={project.unique_name}
showBookmarks={true}
useFilters={true}
/>,
relUrl: 'groups'
}, {
title: 'Jobs',
component: <Jobs
user={project.user}
projectName={project.unique_name}
showBookmarks={true}
useFilters={true}
/>,
relUrl: 'jobs'
}, {
title: 'Builds',
component: <Builds
user={project.user}
projectName={project.unique_name}
showBookmarks={true}
useFilters={true}
/>,
relUrl: 'builds'
}, {
title: 'Activity logs',
component: <ActivityLogs user={project.user} projectName={project.name}/>,
relUrl: 'activitylogs'
}, {
title: 'Instructions',
component: <ProjectInstructions projectName={project.unique_name}/>,
relUrl: 'instructions'
}
]}
/>
</div>
</div>
);
}
}
|
# Sets reasonable macOS defaults.
#
# Or, in other words, set shit how I like in macOS.
#
# The original idea (and a couple settings) were grabbed from:
# https://github.com/mathiasbynens/dotfiles/blob/master/.macos
#
# Run ./set-defaults.sh and you'll be good to go.
# Disable press-and-hold for keys in favor of key repeat.
defaults write -g ApplePressAndHoldEnabled -bool false
# Use AirDrop over every interface. srsly this should be a default.
defaults write com.apple.NetworkBrowser BrowseAllInterfaces 1
# Always open everything in Finder's list view. This is important.
defaults write com.apple.Finder FXPreferredViewStyle Nlsv
# Show the ~/Library folder.
chflags nohidden ~/Library
# Set a really fast key repeat.
# defaults write NSGlobalDomain KeyRepeat -float 0.000000000001
defaults write NSGlobalDomain KeyRepeat -int 1
defaults write NSGlobalDomain InitialKeyRepeat -int 5
# Set the Finder prefs for showing a few different volumes on the Desktop.
defaults write com.apple.finder ShowExternalHardDrivesOnDesktop -bool true
defaults write com.apple.finder ShowRemovableMediaOnDesktop -bool true
# Run the screensaver if we're in the bottom-left hot corner.
defaults write com.apple.dock wvous-bl-corner -int 5
defaults write com.apple.dock wvous-bl-modifier -int 0
# Hide Safari's bookmark bar.
defaults write com.apple.Safari ShowFavoritesBar -bool false
# Set up Safari for development.
defaults write com.apple.Safari IncludeInternalDebugMenu -bool true
defaults write com.apple.Safari IncludeDevelopMenu -bool true
defaults write com.apple.Safari WebKitDeveloperExtrasEnabledPreferenceKey -bool true
defaults write com.apple.Safari "com.apple.Safari.ContentPageGroupIdentifier.WebKit2DeveloperExtrasEnabled" -bool true
defaults write NSGlobalDomain WebKitDeveloperExtras -bool true
|
#!/bin/bash
# texttobinary.sh
function usage {
echo "usage: texttobinary.sh [-d] [-w width] [-e endian] < in.txt > out.dat"
echo "converts comma-separated numbers on stdin to binary data on stdout"
echo "-d: debug print string of hex escapes"
echo "-w: width in bytes of binary word, default is 4"
echo "-e: endian format big|little, default is little"
echo "in.txt: numbers separated by commas or whitespace"
echo "examples:"
echo
echo "texttobinary.sh -d < <(echo -e \"9, 2, 3, -7\n-5 100 1024\")"
echo "\x09\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\xf9\xff\xff\xff\xfb\xff\xff\xff\x64\x00\x00\x00\x00\x04\x00\x00"
echo
echo "echo -e \"9, 2, 3, -7\n-5 100 1024\" | texttobinary.sh | od -An -td4 -w4 -v"
}
while getopts "de:hw:" opt; do
case $opt in
d) debug=true;;
e) endian=$OPTARG;;
h) usage; exit 0;;
w) width=$OPTARG;;
*) usage; exit 1
esac
done
shift $((OPTIND-1))
: ${width:=4} ${endian:=little} ${debug:=false}
[[ $endian != big && $endian != little ]] && usage && exit 1
let len=2*width
while IFS+=, read -a vals; do
for((i=0; i < ${#vals[*]}; ++i)); do
val=${vals[i]}
# printf output is 64-bit bigendian
# printf "0x%.2x" -7 prints 0xfffffffffffffff9
printf -vhex "%.${len}x" $val
# truncate sign extension from right for 64-bit negative numbers past maximum length eg -7 should be 0xfffffff9 for 4 bytes
if ((val < 0)); then
hex=${hex: -len}
fi
# reverse bytes for littleendian
if [[ $endian == little ]]; then
for((j=0; j < ${#hex}; j+=2)); do
str+="\\x${hex: -j-2:2}"
done
elif [[ $endian == big ]]; then
for((j=0; j < ${#hex}; j+=2)); do
str+="\\x${hex:j:2}"
done
fi
done
done
# prints hex escaped string for debugging
if $debug; then
echo "$str"
exit 0
fi
# print binary data
echo -en "$str"
|
<reponame>lananh265/social-network
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.ecommerce_receipt_rublo = void 0;
var ecommerce_receipt_rublo = {
"viewBox": "0 0 64 64",
"children": [{
"name": "g",
"attribs": {},
"children": [{
"name": "polygon",
"attribs": {
"fill": "none",
"stroke": "#000000",
"stroke-width": "2",
"stroke-miterlimit": "10",
"points": "52,62.999 52,0.999 26,0.999 12,14.999 \r\n\t\t12,63 16,61 20,63 24,61 28,63 32,61 36,63 40,61 44,63 48,61 \t"
},
"children": [{
"name": "polygon",
"attribs": {
"fill": "none",
"stroke": "#000000",
"stroke-width": "2",
"stroke-miterlimit": "10",
"points": "52,62.999 52,0.999 26,0.999 12,14.999 \r\n\t\t12,63 16,61 20,63 24,61 28,63 32,61 36,63 40,61 44,63 48,61 \t"
},
"children": []
}]
}, {
"name": "polyline",
"attribs": {
"fill": "none",
"stroke": "#000000",
"stroke-width": "2",
"stroke-miterlimit": "10",
"points": "12,14.999 26,14.999 26,0.999 \t"
},
"children": [{
"name": "polyline",
"attribs": {
"fill": "none",
"stroke": "#000000",
"stroke-width": "2",
"stroke-miterlimit": "10",
"points": "12,14.999 26,14.999 26,0.999 \t"
},
"children": []
}]
}]
}, {
"name": "line",
"attribs": {
"fill": "none",
"stroke": "#000000",
"stroke-width": "2",
"stroke-miterlimit": "10",
"x1": "26",
"y1": "48",
"x2": "26",
"y2": "20"
},
"children": []
}, {
"name": "path",
"attribs": {
"fill": "none",
"stroke": "#000000",
"stroke-width": "2",
"stroke-miterlimit": "10",
"d": "M26,21h5.953c0,0,10.078-0.723,10.078,8\r\n\ts-10.078,8-10.078,8H22"
},
"children": []
}, {
"name": "line",
"attribs": {
"fill": "none",
"stroke": "#000000",
"stroke-width": "2",
"stroke-miterlimit": "10",
"x1": "22",
"y1": "42",
"x2": "33",
"y2": "42"
},
"children": []
}]
};
exports.ecommerce_receipt_rublo = ecommerce_receipt_rublo; |
#!/bin/sh -e
# hello-go-deploy-marathon example-01 build.sh
echo " "
if [ "$1" = "-debug" ]
then
echo "************************************************************************"
echo "* build.sh -debug (START) **********************************************"
echo "************************************************************************"
# set -x enables a mode of the shell where all executed commands are printed to the terminal.
set -x
echo " "
else
echo "************************************************************************"
echo "* build.sh (START) *****************************************************"
echo "************************************************************************"
echo " "
fi
echo "cd to where go code is"
echo "cd .."
cd ..
echo "Build your docker image using Dockerfile"
echo "NOTE: The binary is built using this step"
echo "docker build -f build-push/Dockerfile -t jeffdecola/hello-go-deploy-marathon ."
docker build -f build-push/Dockerfile -t jeffdecola/hello-go-deploy-marathon .
echo " "
echo "Check Docker Image size"
echo "docker images jeffdecola/hello-go-deploy-marathon:latest"
docker images jeffdecola/hello-go-deploy-marathon:latest
echo " "
echo "Useful commands:"
echo " docker run --name hello-go-deploy-marathon -dit jeffdecola/hello-go-deploy-marathon"
echo " docker exec -i -t hello-go-deploy-marathon /bin/bash"
echo " docker logs hello-go-deploy-marathon"
echo " "
echo "************************************************************************"
echo "* build.sh (END) *******************************************************"
echo "************************************************************************"
echo " "
|
export const ACTIVE_CHAT = 'ACTIVE_CHAT';
export const ACTIVE_CHAT_USER = 'ACTIVE_CHAT_USER'; |
import React, {PropTypes} from 'react';
import {connect} from 'react-redux';
import {Table, Modal, Button} from 'react-bootstrap';
import {deleteProblem} from '../../actions/problemActions';
import EditProblemModal from './EditProblemModal';
import {push} from 'react-router-redux';
const initialState = {showEditModal: false, showDeleteModal: false, selectedProblem: undefined};
class ProblemTable extends React.Component {
constructor(props) {
super(props);
this.state = initialState;
this.mapProblemType = this.mapProblemType.bind(this);
this.closeDeleteModal = this.closeDeleteModal.bind(this);
this.openDeleteModal = this.openDeleteModal.bind(this);
this.onDeleteRoomClick = this.onDeleteRoomClick.bind(this);
this.openEditModal = this.openEditModal.bind(this);
this.closeEditModal = this.closeEditModal.bind(this);
}
openDeleteModal(id) {
this.setState({showDeleteModal:true, selectedProblem:id});
}
closeDeleteModal() {
this.setState({showDeleteModal:false});
}
openEditModal(id) {
this.setState({selectedProblem:id, showEditModal: true});
}
closeEditModal() {
this.setState({showEditModal: false});
}
onDeleteRoomClick() {
this.props.deleteProblem(this.state.selectedProblem);
this.closeDeleteModal();
}
mapProblemType(type) {
switch (type) {
case "choice":
return "选择题";
case "blank":
return "填空题";
case "answer":
return "简答题";
case "code":
return "编程题";
default:
return "未知类型";
}
}
render() {
return (
<div>
<Table>
<tbody>
{
this.props.isWaiting ?
<tr>
<td>正在读取...</td>
</tr>
:
this.props.problems.length > 0 ?
this.props.problems.map(problem =>
<tr key={problem.id}>
<td>
<a className="room-name">{problem.content.title}</a>
</td>
<td>
{this.mapProblemType(problem.type)}
</td>
<td className="aln-right">
<a className="tb-link" onClick={() => {this.openEditModal(problem.id);}}>编辑</a> | <a
className="tb-link" onClick={() => {this.openDeleteModal(problem.id);}}>删除</a>
</td>
</tr>)
:
<tr>
<td>暂无题目</td>
</tr> }
</tbody>
</Table>
<Modal show={this.state.showDeleteModal} onHide={this.closeDeleteModal}>
<Modal.Header closeButton>
<Modal.Title>确认删除题目?</Modal.Title>
</Modal.Header>
<Modal.Footer>
<Button onClick={this.closeDeleteModal}>取消</Button>
<Button bsStyle="primary" onClick={this.onDeleteRoomClick}>确认</Button>
</Modal.Footer>
</Modal>
<EditProblemModal show={this.state.showEditModal} onHide={this.closeEditModal} type="choice"
selectedProblem={this.state.selectedProblem}/>
</div>
);
}
}
ProblemTable.propTypes = {
problems: PropTypes.arrayOf(PropTypes.object).isRequired,
isWaiting: PropTypes.bool.isRequired,
deleteProblem: PropTypes.func.isRequired
};
function mapStateToProps(state) {
return {
problems: state.problemStates.problems,
isWaiting: state.problemStates.isWaiting
};
}
export default connect(mapStateToProps, {deleteProblem})(ProblemTable);
|
<filename>tests/android/browser.spec.ts
/**
* Copyright 2020 Microsoft Corporation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import net from 'net';
import { androidTest as test, expect } from './androidTest';
test('androidDevice.model', async function({ androidDevice }) {
expect(androidDevice.model()).toBe('sdk_gphone64_x86_64');
});
test('androidDevice.launchBrowser', async function({ androidDevice }) {
const context = await androidDevice.launchBrowser();
const [page] = context.pages();
await page.goto('data:text/html,<title>Hello world!</title>');
expect(await page.title()).toBe('Hello world!');
await context.close();
});
test('should create new page', async function({ androidDevice }) {
const context = await androidDevice.launchBrowser();
const page = await context.newPage();
await page.goto('data:text/html,<title>Hello world!</title>');
expect(await page.title()).toBe('Hello world!');
await page.close();
await context.close();
});
test('should check', async function({ androidDevice }) {
const context = await androidDevice.launchBrowser();
const [page] = context.pages();
await page.setContent(`<input id='checkbox' type='checkbox'></input>`);
await page.check('input');
expect(await page.evaluate(() => window['checkbox'].checked)).toBe(true);
await page.close();
await context.close();
});
test('should be able to send CDP messages', async ({ androidDevice }) => {
const context = await androidDevice.launchBrowser();
const [page] = context.pages();
const client = await context.newCDPSession(page);
await client.send('Runtime.enable');
const evalResponse = await client.send('Runtime.evaluate', { expression: '1 + 2', returnByValue: true });
expect(evalResponse.result.value).toBe(3);
});
test('should be able to use a custom port', async function({ playwright }) {
const proxyPort = 5038;
let countOfIncomingConnections = 0;
let countOfConnections = 0;
const server = net.createServer(socket => {
++countOfIncomingConnections;
++countOfConnections;
socket.on('close', () => countOfConnections--);
const client = net.connect(5037);
socket.pipe(client).pipe(socket);
});
await new Promise<void>(resolve => server.listen(proxyPort, resolve));
const devices = await playwright._android.devices({ port: proxyPort });
expect(countOfIncomingConnections).toBeGreaterThanOrEqual(1);
expect(devices).toHaveLength(1);
const device = devices[0];
const value = await device.shell('echo foobar');
expect(value.toString()).toBe('foobar\n');
await device.close();
await new Promise(resolve => server.close(resolve));
expect(countOfIncomingConnections).toBeGreaterThanOrEqual(1);
expect(countOfConnections).toBe(0);
});
test('should be able to pass context options', async ({ androidDevice, httpsServer }) => {
const context = await androidDevice.launchBrowser({
colorScheme: 'dark',
geolocation: { longitude: 10, latitude: 10 },
permissions: ['geolocation'],
ignoreHTTPSErrors: true,
baseURL: httpsServer.PREFIX,
});
const [page] = context.pages();
await page.goto('./empty.html');
expect(page.url()).toBe(httpsServer.PREFIX + '/empty.html');
expect(await page.evaluate(() => new Promise(resolve => navigator.geolocation.getCurrentPosition(position => {
resolve({ latitude: position.coords.latitude, longitude: position.coords.longitude });
})))).toEqual({ latitude: 10, longitude: 10 });
expect(await page.evaluate(() => matchMedia('(prefers-color-scheme: dark)').matches)).toBe(true);
expect(await page.evaluate(() => matchMedia('(prefers-color-scheme: light)').matches)).toBe(false);
});
|
package io.quarkus.micrometer.deployment.binder;
import java.util.function.BooleanSupplier;
import io.quarkus.arc.deployment.UnremovableBeanBuildItem;
import io.quarkus.deployment.annotations.BuildProducer;
import io.quarkus.deployment.annotations.BuildStep;
import io.quarkus.deployment.builditem.nativeimage.NativeImageResourceBuildItem;
import io.quarkus.deployment.builditem.nativeimage.ReflectiveClassBuildItem;
import io.quarkus.micrometer.deployment.MicrometerProcessor;
import io.quarkus.micrometer.runtime.MicrometerRecorder;
import io.quarkus.micrometer.runtime.config.MicrometerConfig;
public class RestClientProcessor {
// Avoid referencing optional dependencies
// Rest client listener SPI
private static final String REST_CLIENT_LISTENER_CLASS_NAME = "org.eclipse.microprofile.rest.client.spi.RestClientListener";
private static final Class<?> REST_CLIENT_LISTENER_CLASS = MicrometerRecorder
.getClassForName(REST_CLIENT_LISTENER_CLASS_NAME);
// Rest Client listener
private static final String REST_CLIENT_METRICS_LISTENER = "io.quarkus.micrometer.runtime.binder.RestClientMetrics";
// Http Client runtime config (injected programmatically)
private static final String REST_CLIENT_HTTP_CONFIG = "io.quarkus.micrometer.runtime.config.runtime.HttpClientConfig";
static class RestClientEnabled implements BooleanSupplier {
MicrometerConfig mConfig;
public boolean getAsBoolean() {
return REST_CLIENT_LISTENER_CLASS != null && mConfig.checkBinderEnabledWithDefault(mConfig.binder.httpClient);
}
}
@BuildStep(onlyIf = { RestClientEnabled.class, MicrometerProcessor.HttpClientBinderEnabled.class })
UnremovableBeanBuildItem registerRestClientListener(BuildProducer<NativeImageResourceBuildItem> resource,
BuildProducer<ReflectiveClassBuildItem> reflectiveClass) {
resource.produce(new NativeImageResourceBuildItem(
"META-INF/services/org.eclipse.microprofile.rest.client.spi.RestClientListener"));
reflectiveClass
.produce(new ReflectiveClassBuildItem(true, true, REST_CLIENT_METRICS_LISTENER));
return new UnremovableBeanBuildItem(new UnremovableBeanBuildItem.BeanClassNameExclusion(REST_CLIENT_HTTP_CONFIG));
}
}
|
#!/bin/sh
# customfield_12000 = sector
# customfield_11909 = segment type
# customfield_11905 = Part Number
# customfield_11907 = Original Partner Blank Allocation
# customfield_11906 = Item Location (India, Canon, ...)
# customfield_11912 = Risk of Loss
# components = Components (Planned, In-Work Blank, Accepted Blank, In-Work Roundel, Acceptance View Roundel)
# status = Status (TO DO, In Progress, ...) // JIRA Status
# summary = title (includes segment id: ex: "M1 Segment SN-072")
# Segment no: 1 to 574
# Note: Need to get a list of all JIRA tasks, since some blanks were discarded and new issues created!
segmentNumber=340
#curl \
# -D- \
# -u ${JIRA_USER}:${JIRA_API_TOKEN} \
# -X GET \
# -H "Content-Type: application/json" \
# https://tmt-project.atlassian.net/rest/api/latest/issue/M1ST-${segmentNumber}?fields=customfield_12000,customfield_11909,customfield_11905,customfield_11907,customfield_11906,customfield_11912,components,status,summary
curl \
-D- \
-u ${JIRA_USER}:${JIRA_API_TOKEN} \
-X GET \
-H "Content-Type: application/json" \
https://tmt-project.atlassian.net/rest/api/latest/issue/M1ST-${segmentNumber}
|
package blurhash
import "errors"
var (
// ErrIncorrectComponents blurHash must have between 1 and 9 components
ErrIncorrectComponents = errors.New("blurHash must have between 1 and 9 components")
// ErrWrongSize width and height must match the pixels array
ErrWrongSize = errors.New("width and height must match the pixels array")
// ErrWrongBlurhashLen the blurhash string must be at least 6 characters
ErrWrongBlurhashLen = errors.New("the blurhash string must be at least 6 characters")
// ErrBlurhashInvalid invalid blurhash
ErrBlurhashInvalid = errors.New("invalid blurhash")
)
|
#ifndef HV_UDP_CLIENT_HPP_
#define HV_UDP_CLIENT_HPP_
#include "hsocket.h"
#include "EventLoopThread.h"
#include "Callback.h"
#include "Channel.h"
namespace hv {
class UdpClient {
public:
UdpClient() {
}
virtual ~UdpClient() {
}
EventLoopPtr loop() {
return loop_thread.loop();
}
//@retval >=0 sockfd, <0 error
int createsocket(int port, const char* host = "127.0.0.1") {
hio_t* io = hloop_create_udp_client(loop_thread.hloop(), host, port);
if (io == NULL) return -1;
channel.reset(new SocketChannel(io));
return channel->fd();
}
void start(bool wait_threads_started = true) {
loop_thread.start(wait_threads_started,
[this]() {
assert(channel != NULL);
channel->onread = [this](Buffer* buf) {
if (onMessage) {
onMessage(channel, buf);
}
};
channel->onwrite = [this](Buffer* buf) {
if (onWriteComplete) {
onWriteComplete(channel, buf);
}
};
channel->startRead();
return 0;
}
);
}
void stop(bool wait_threads_stopped = true) {
loop_thread.stop(wait_threads_stopped);
}
int sendto(Buffer* buf) {
if (channel == NULL) return 0;
return channel->write(buf);
}
int sendto(const std::string& str) {
if (channel == NULL) return 0;
return channel->write(str);
}
public:
SocketChannelPtr channel;
// Callback
MessageCallback onMessage;
WriteCompleteCallback onWriteComplete;
private:
EventLoopThread loop_thread;
};
}
#endif // HV_UDP_CLIENT_HPP_
|
<filename>src/routing/ConstrainedDelaunayTriangulation/Cdt.ts
/*
Following "Sweep-line algorithm for constrained Delaunay triangulation", by <NAME> Zalik
*/
//triangulates the space between point, line segment and polygons of the Delaunay fashion
import {from, IEnumerable} from 'linq-to-typescript'
import {GeomConstants} from '../../math/geometry/geomConstants'
import {Point} from '../../math/geometry/point'
import {Polyline} from '../../math/geometry/polyline'
import {Rectangle} from '../../math/geometry/rectangle'
// import {Assert} from '../../utils/assert'
import {PointMap} from '../../utils/PointMap'
import {Algorithm} from './../../utils/algorithm'
import {CdtEdge} from './CdtEdge'
import {CdtSite} from './CdtSite'
import {CdtTriangle} from './CdtTriangle'
import {SymmetricTuple} from './../../structs/SymmetricTuple'
import {CdtSweeper} from './CdtSweeper'
import {
RectangleNode,
CreateRectangleNodeOnEnumeration,
mkRectangleNode,
} from '../../math/geometry/RTree/RectangleNode'
type SymmetricSegment = SymmetricTuple<Point>
export class Cdt extends Algorithm {
isolatedSitesWithObject: Array<[Point, unknown]>
isolatedSites: Point[] = []
obstacles: Polyline[] = []
isolatedSegments: Array<SymmetricSegment>
P1: CdtSite
P2: CdtSite
sweeper: CdtSweeper
PointsToSites: PointMap<CdtSite> = new PointMap<CdtSite>()
allInputSites: Array<CdtSite>
// constructor
constructor(
isolatedSites: Point[],
obstacles: IEnumerable<Polyline>,
isolatedSegments: IEnumerable<SymmetricSegment>,
) {
super(null)
this.isolatedSites = isolatedSites
if (obstacles) this.obstacles = obstacles.toArray()
if (isolatedSegments) this.isolatedSegments = isolatedSegments.toArray()
}
// constructor
static constructor_(isolatedSitesWithObj: Array<[Point, unknown]>) {
const r = new Cdt(null, null, null)
r.isolatedSitesWithObject = isolatedSitesWithObj
return r
}
FillAllInputSites() {
// for now suppose that the data is correct: no isolatedSites coincide with obstacles or isolatedSegments, obstacles are mutually disjoint, etc
if (this.isolatedSitesWithObject != null) {
for (const tuple of this.isolatedSitesWithObject) {
this.AddSite(tuple[0], tuple[1])
}
}
if (this.isolatedSites != null) {
for (const isolatedSite of this.isolatedSites) {
this.AddSite(isolatedSite, null)
}
}
if (this.obstacles != null) {
for (const poly of this.obstacles) {
this.AddPolylineToAllInputSites(poly)
}
}
if (this.isolatedSegments != null) {
for (const isolatedSegment of this.isolatedSegments) {
this.AddConstrainedEdge(isolatedSegment.A, isolatedSegment.B, null)
}
}
this.AddP1AndP2()
this.allInputSites = Array.from(this.PointsToSites.values())
}
AddSite(point: Point, relatedObject: unknown): CdtSite {
let site: CdtSite
if ((site = this.PointsToSites.get(point))) {
site.Owner = relatedObject
// set the owner anyway
} else {
site = CdtSite.mkSO(point, relatedObject)
this.PointsToSites.set(point, site)
}
return site
}
AddP1AndP2() {
const box = Rectangle.mkEmpty()
for (const site of this.PointsToSites.keys()) {
box.add(site)
}
const delx = Math.max(box.width / 3, 1)
const dely = Math.max(box.height / 3, 1)
this.P1 = new CdtSite(box.leftBottom.add(new Point(-delx, -dely)))
this.P2 = new CdtSite(box.rightBottom.add(new Point(delx, -dely)))
}
AddPolylineToAllInputSites(poly: Polyline) {
for (let pp = poly.startPoint; pp.next != null; pp = pp.next) {
this.AddConstrainedEdge(pp.point, pp.next.point, poly)
}
if (poly.closed) {
this.AddConstrainedEdge(poly.endPoint.point, poly.startPoint.point, poly)
}
}
AddConstrainedEdge(a: Point, b: Point, poly: Polyline) {
const ab = Cdt.AbovePP(a, b)
/*Assert.assert(ab != 0)*/
let upperPoint: CdtSite
let lowerPoint: CdtSite
if (ab > 0) {
// a is above b
upperPoint = this.AddSite(a, poly)
lowerPoint = this.AddSite(b, poly)
} else {
/*Assert.assert(ab < 0)*/
upperPoint = this.AddSite(b, poly)
lowerPoint = this.AddSite(a, poly)
}
const edge = Cdt.CreateEdgeOnOrderedCouple(upperPoint, lowerPoint)
edge.Constrained = true
/*Assert.assert(this.EdgeIsCorrect(edge))*/
}
static GetOrCreateEdge(a: CdtSite, b: CdtSite): CdtEdge {
if (Cdt.AboveCC(a, b) == 1) {
const e = a.EdgeBetweenUpperSiteAndLowerSite(b)
if (e != null) {
return e
}
return Cdt.CreateEdgeOnOrderedCouple(a, b)
} else {
const e = b.EdgeBetweenUpperSiteAndLowerSite(a)
if (e != null) {
return e
}
return Cdt.CreateEdgeOnOrderedCouple(b, a)
}
}
static CreateEdgeOnOrderedCouple(
upperPoint: CdtSite,
lowerPoint: CdtSite,
): CdtEdge {
/*Assert.assert(Cdt.AboveCC(upperPoint, lowerPoint) == 1)*/
return new CdtEdge(upperPoint, lowerPoint)
}
public GetTriangles(): Set<CdtTriangle> {
return this.sweeper.triangles
}
// Executes the actual algorithm.
run() {
this.Initialization()
this.SweepAndFinalize()
}
SweepAndFinalize() {
this.sweeper = new CdtSweeper(
this.allInputSites,
this.P1,
this.P2,
Cdt.GetOrCreateEdge,
)
this.sweeper.run()
}
Initialization() {
this.FillAllInputSites()
this.allInputSites.sort(Cdt.OnComparison)
}
static OnComparison(a: CdtSite, b: CdtSite): number {
return Cdt.AboveCC(a, b)
}
// compare first y then -x coordinates
public static AbovePP(a: Point, b: Point): number {
let del = a.y - b.y
if (del > 0) {
return 1
}
if (del < 0) {
return -1
}
del = a.x - b.x
// for a horizontal edge return the point with the smaller X
return del > 0 ? -1 : del < 0 ? 1 : 0
}
// compare first y then -x coordinates
static AboveCC(a: CdtSite, b: CdtSite): number {
return Cdt.AbovePP(a.point, b.point)
}
RestoreEdgeCapacities() {
for (const site of this.allInputSites) {
for (const e of site.Edges) {
if (!e.Constrained) {
e.ResidualCapacity = e.Capacity
}
}
}
}
public SetInEdges() {
for (const site of this.PointsToSites.values()) {
for (const e of site.Edges) {
const oSite = e.lowerSite
/*Assert.assert(oSite != site)*/
oSite.AddInEdge(e)
}
}
}
public FindSite(point: Point): CdtSite {
return this.PointsToSites.get(point)
}
static PointIsInsideOfTriangle(point: Point, t: CdtTriangle): boolean {
for (let i = 0; i < 3; i++) {
const a = t.Sites.getItem(i).point
const b = t.Sites.getItem(i + 1).point
if (
Point.signedDoubledTriangleArea(point, a, b) <
GeomConstants.distanceEpsilon * -1
) {
return false
}
}
return true
}
cdtTree: RectangleNode<CdtTriangle, Point> = null
GetCdtTree(): RectangleNode<CdtTriangle, Point> {
if (this.cdtTree == null) {
this.cdtTree = CreateRectangleNodeOnEnumeration(
from(this.GetTriangles().values()).select((t) =>
mkRectangleNode<CdtTriangle, Point>(t, t.BoundingBox()),
),
)
}
return this.cdtTree
}
EdgeIsCorrect(edge: CdtEdge): boolean {
const us = edge.upperSite
let edgeIsThere = false
for (const e of us.Edges) {
if (e == edge) {
edgeIsThere = true
break
}
}
if (!edgeIsThere) {
return false
}
const usShouldBe = this.PointsToSites.get(us.point)
return usShouldBe == us
}
}
|
SCRIPT_PATH=$(dirname ${BASH_SOURCE[0]})
hl() {
if which python3 &>/dev/null; then
python3 $SCRIPT_PATH/highlite.py "$@"
else
echo "Couldn't locate python3"
fi
}
|
import numpy as np
import torch
class DataPreprocessor:
def __init__(self):
self.input_data = None
self.input_prob = None
def preprocess_data(self, payoff_matrix_split, col_prob_split, col_action_split, selected_indices):
input_data = np.concatenate([payoff_matrix_split[x] for x in selected_indices]).astype(np.float32)
input_prob = np.concatenate([col_prob_split[x] for x in selected_indices]).astype(np.float32)
input_action = np.concatenate([col_action_split[x] for x in selected_indices]).astype(np.uint8)
self.input_data = torch.from_numpy(input_data)
self.input_prob = torch.from_numpy(input_prob) |
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for the local config.
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
# config-default.sh.
readonly GCE_MAX_LOCAL_SSD=8
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/gce/${KUBE_CONFIG_FILE-"config-default.sh"}"
source "${KUBE_ROOT}/cluster/common.sh"
source "${KUBE_ROOT}/hack/lib/util.sh"
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "ubuntu" || "${NODE_OS_DISTRIBUTION}" == "custom" ]]; then
source "${KUBE_ROOT}/cluster/gce/${NODE_OS_DISTRIBUTION}/node-helper.sh"
else
echo "Cannot operate on cluster using node os distro: ${NODE_OS_DISTRIBUTION}" >&2
exit 1
fi
if [[ "${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]]; then
source "${KUBE_ROOT}/cluster/gce/${MASTER_OS_DISTRIBUTION}/master-helper.sh"
else
echo "Cannot operate on cluster using master os distro: ${MASTER_OS_DISTRIBUTION}" >&2
exit 1
fi
if [[ ${NODE_LOCAL_SSDS:-} -ge 1 ]] && [[ ! -z ${NODE_LOCAL_SSDS_EXT:-} ]] ; then
echo -e "${color_red}Local SSD: Only one of NODE_LOCAL_SSDS and NODE_LOCAL_SSDS_EXT can be specified at once${color_norm}" >&2
exit 2
fi
if [[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]]; then
DEFAULT_GCI_PROJECT=google-containers
if [[ "${GCI_VERSION}" == "cos"* ]]; then
DEFAULT_GCI_PROJECT=cos-cloud
fi
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-${DEFAULT_GCI_PROJECT}}
# If the master image is not set, we use the latest GCI image.
# Otherwise, we respect whatever is set by the user.
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-${GCI_VERSION}}
fi
# Sets node image based on the specified os distro. Currently this function only
# supports gci and debian.
function set-node-image() {
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]]; then
DEFAULT_GCI_PROJECT=google-containers
if [[ "${GCI_VERSION}" == "cos"* ]]; then
DEFAULT_GCI_PROJECT=cos-cloud
fi
# If the node image is not set, we use the latest GCI image.
# Otherwise, we respect whatever is set by the user.
NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}}
NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-${DEFAULT_GCI_PROJECT}}
fi
}
set-node-image
# Verfiy cluster autoscaler configuration.
if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
if [[ -z $AUTOSCALER_MIN_NODES ]]; then
echo "AUTOSCALER_MIN_NODES not set."
exit 1
fi
if [[ -z $AUTOSCALER_MAX_NODES ]]; then
echo "AUTOSCALER_MAX_NODES not set."
exit 1
fi
fi
NODE_INSTANCE_PREFIX=${NODE_INSTANCE_PREFIX:-"${INSTANCE_PREFIX}-minion"}
NODE_TAGS="${NODE_TAG}"
ALLOCATE_NODE_CIDRS=true
PREEXISTING_NETWORK=false
PREEXISTING_NETWORK_MODE=""
KUBE_PROMPT_FOR_UPDATE=${KUBE_PROMPT_FOR_UPDATE:-"n"}
# How long (in seconds) to wait for cluster initialization.
KUBE_CLUSTER_INITIALIZATION_TIMEOUT=${KUBE_CLUSTER_INITIALIZATION_TIMEOUT:-300}
function join_csv() {
local IFS=','; echo "$*";
}
# This function returns the first string before the comma
function split_csv() {
echo "$*" | cut -d',' -f1
}
# Verify prereqs
function verify-prereqs() {
local cmd
# we use openssl to generate certs
kube::util::test_openssl_installed
# ensure a version supported by easyrsa is installed
if [ "$(openssl version | cut -d\ -f1)" == "LibreSSL" ]; then
echo "LibreSSL is not supported. Please ensure openssl points to an OpenSSL binary"
if [ "$(uname -s)" == "Darwin" ]; then
echo 'On macOS we recommend using homebrew and adding "$(brew --prefix openssl)/bin" to your PATH'
fi
exit 1
fi
# we use gcloud to create the cluster, gsutil to stage binaries and data
for cmd in gcloud gsutil; do
if ! which "${cmd}" >/dev/null; then
local resp="n"
if [[ "${KUBE_PROMPT_FOR_UPDATE}" == "y" ]]; then
echo "Can't find ${cmd} in PATH. Do you wish to install the Google Cloud SDK? [Y/n]"
read resp
fi
if [[ "${resp}" != "n" && "${resp}" != "N" ]]; then
curl https://sdk.cloud.google.com | bash
fi
if ! which "${cmd}" >/dev/null; then
echo "Can't find ${cmd} in PATH, please fix and retry. The Google Cloud " >&2
echo "SDK can be downloaded from https://cloud.google.com/sdk/." >&2
exit 1
fi
fi
done
update-or-verify-gcloud
}
# Use the gcloud defaults to find the project. If it is already set in the
# environment then go with that.
#
# Vars set:
# PROJECT
# NETWORK_PROJECT
# PROJECT_REPORTED
function detect-project() {
if [[ -z "${PROJECT-}" ]]; then
PROJECT=$(gcloud config list project --format 'value(core.project)')
fi
NETWORK_PROJECT=${NETWORK_PROJECT:-${PROJECT}}
if [[ -z "${PROJECT-}" ]]; then
echo "Could not detect Google Cloud Platform project. Set the default project using " >&2
echo "'gcloud config set project <PROJECT>'" >&2
exit 1
fi
if [[ -z "${PROJECT_REPORTED-}" ]]; then
echo "Project: ${PROJECT}" >&2
echo "Network Project: ${NETWORK_PROJECT}" >&2
echo "Zone: ${ZONE}" >&2
PROJECT_REPORTED=true
fi
}
# Use gsutil to get the md5 hash for a particular tar
function gsutil_get_tar_md5() {
# location_tar could be local or in the cloud
# local tar_location example ./_output/release-tars/kubernetes-server-linux-amd64.tar.gz
# cloud tar_location example gs://kubernetes-staging-PROJECT/kubernetes-devel/kubernetes-server-linux-amd64.tar.gz
local -r tar_location=$1
#parse the output and return the md5 hash
#the sed command at the end removes whitespace
local -r tar_md5=$(gsutil hash -h -m ${tar_location} 2>/dev/null | grep "Hash (md5):" | awk -F ':' '{print $2}' | sed 's/^[[:space:]]*//g')
echo "${tar_md5}"
}
# Copy a release tar and its accompanying hash.
function copy-to-staging() {
local -r staging_path=$1
local -r gs_url=$2
local -r tar=$3
local -r hash=$4
local -r basename_tar=$(basename ${tar})
#check whether this tar alread exists and has the same hash
#if it matches, then don't bother uploading it again
#remote_tar_md5 checks the remote location for the existing tarball and its md5
#staging_path example gs://kubernetes-staging-PROJECT/kubernetes-devel
#basename_tar example kubernetes-server-linux-amd64.tar.gz
local -r remote_tar_md5=$(gsutil_get_tar_md5 "${staging_path}/${basename_tar}")
if [[ -n ${remote_tar_md5} ]]; then
#local_tar_md5 checks the remote location for the existing tarball and its md5 hash
#tar example ./_output/release-tars/kubernetes-server-linux-amd64.tar.gz
local -r local_tar_md5=$(gsutil_get_tar_md5 "${tar}")
if [[ "${remote_tar_md5}" == "${local_tar_md5}" ]]; then
echo "+++ ${basename_tar} uploaded earlier, cloud and local file md5 match (md5 = ${local_tar_md5})"
return 0
fi
fi
echo "${hash}" > "${tar}.sha1"
gsutil -m -q -h "Cache-Control:private, max-age=0" cp "${tar}" "${tar}.sha1" "${staging_path}"
gsutil -m acl ch -g all:R "${gs_url}" "${gs_url}.sha1" >/dev/null 2>&1
echo "+++ ${basename_tar} uploaded (sha1 = ${hash})"
}
# Given the cluster zone, return the list of regional GCS release
# bucket suffixes for the release in preference order. GCS doesn't
# give us an API for this, so we hardcode it.
#
# Assumed vars:
# RELEASE_REGION_FALLBACK
# REGIONAL_KUBE_ADDONS
# ZONE
# Vars set:
# PREFERRED_REGION
function set-preferred-region() {
case ${ZONE} in
asia-*)
PREFERRED_REGION=("asia" "us" "eu")
;;
europe-*)
PREFERRED_REGION=("eu" "us" "asia")
;;
*)
PREFERRED_REGION=("us" "eu" "asia")
;;
esac
if [[ "${RELEASE_REGION_FALLBACK}" != "true" ]]; then
PREFERRED_REGION=( "${PREFERRED_REGION[0]}" )
fi
}
# Take the local tar files and upload them to Google Storage. They will then be
# downloaded by the master as part of the start up script for the master.
#
# Assumed vars:
# PROJECT
# SERVER_BINARY_TAR
# KUBE_MANIFESTS_TAR
# ZONE
# Vars set:
# SERVER_BINARY_TAR_URL
# SERVER_BINARY_TAR_HASH
# KUBE_MANIFESTS_TAR_URL
# KUBE_MANIFESTS_TAR_HASH
function upload-server-tars() {
SERVER_BINARY_TAR_URL=
SERVER_BINARY_TAR_HASH=
KUBE_MANIFESTS_TAR_URL=
KUBE_MANIFESTS_TAR_HASH=
local project_hash
if which md5 > /dev/null 2>&1; then
project_hash=$(md5 -q -s "$PROJECT")
else
project_hash=$(echo -n "$PROJECT" | md5sum | awk '{ print $1 }')
fi
# This requires 1 million projects before the probability of collision is 50%
# that's probably good enough for now :P
project_hash=${project_hash:0:10}
set-preferred-region
if [[ "${ENABLE_DOCKER_REGISTRY_CACHE:-}" == "true" ]]; then
DOCKER_REGISTRY_MIRROR_URL="https://mirror.gcr.io"
fi
SERVER_BINARY_TAR_HASH=$(sha1sum-file "${SERVER_BINARY_TAR}")
if [[ -n "${KUBE_MANIFESTS_TAR:-}" ]]; then
KUBE_MANIFESTS_TAR_HASH=$(sha1sum-file "${KUBE_MANIFESTS_TAR}")
fi
local server_binary_tar_urls=()
local kube_manifest_tar_urls=()
for region in "${PREFERRED_REGION[@]}"; do
suffix="-${region}"
if [[ "${suffix}" == "-us" ]]; then
suffix=""
fi
local staging_bucket="gs://kubernetes-staging-${project_hash}${suffix}"
# Ensure the buckets are created
if ! gsutil ls "${staging_bucket}" >/dev/null; then
echo "Creating ${staging_bucket}"
gsutil mb -l "${region}" "${staging_bucket}"
fi
local staging_path="${staging_bucket}/${INSTANCE_PREFIX}-devel"
echo "+++ Staging server tars to Google Storage: ${staging_path}"
local server_binary_gs_url="${staging_path}/${SERVER_BINARY_TAR##*/}"
copy-to-staging "${staging_path}" "${server_binary_gs_url}" "${SERVER_BINARY_TAR}" "${SERVER_BINARY_TAR_HASH}"
# Convert from gs:// URL to an https:// URL
server_binary_tar_urls+=("${server_binary_gs_url/gs:\/\//https://storage.googleapis.com/}")
if [[ -n "${KUBE_MANIFESTS_TAR:-}" ]]; then
local kube_manifests_gs_url="${staging_path}/${KUBE_MANIFESTS_TAR##*/}"
copy-to-staging "${staging_path}" "${kube_manifests_gs_url}" "${KUBE_MANIFESTS_TAR}" "${KUBE_MANIFESTS_TAR_HASH}"
# Convert from gs:// URL to an https:// URL
kube_manifests_tar_urls+=("${kube_manifests_gs_url/gs:\/\//https://storage.googleapis.com/}")
fi
done
SERVER_BINARY_TAR_URL=$(join_csv "${server_binary_tar_urls[@]}")
if [[ -n "${KUBE_MANIFESTS_TAR:-}" ]]; then
KUBE_MANIFESTS_TAR_URL=$(join_csv "${kube_manifests_tar_urls[@]}")
fi
}
# Detect minions created in the minion group
#
# Assumed vars:
# NODE_INSTANCE_PREFIX
# Vars set:
# NODE_NAMES
# INSTANCE_GROUPS
function detect-node-names() {
detect-project
INSTANCE_GROUPS=()
INSTANCE_GROUPS+=($(gcloud compute instance-groups managed list \
--project "${PROJECT}" \
--filter "name ~ '${NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \
--format='value(name)' || true))
NODE_NAMES=()
if [[ -n "${INSTANCE_GROUPS[@]:-}" ]]; then
for group in "${INSTANCE_GROUPS[@]}"; do
NODE_NAMES+=($(gcloud compute instance-groups managed list-instances \
"${group}" --zone "${ZONE}" --project "${PROJECT}" \
--format='value(instance)'))
done
fi
# Add heapster node name to the list too (if it exists).
if [[ -n "${HEAPSTER_MACHINE_TYPE:-}" ]]; then
NODE_NAMES+=("${NODE_INSTANCE_PREFIX}-heapster")
fi
echo "INSTANCE_GROUPS=${INSTANCE_GROUPS[*]:-}" >&2
echo "NODE_NAMES=${NODE_NAMES[*]:-}" >&2
}
# Detect the information about the minions
#
# Assumed vars:
# ZONE
# Vars set:
# NODE_NAMES
# KUBE_NODE_IP_ADDRESSES (array)
function detect-nodes() {
detect-project
detect-node-names
KUBE_NODE_IP_ADDRESSES=()
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
local node_ip=$(gcloud compute instances describe --project "${PROJECT}" --zone "${ZONE}" \
"${NODE_NAMES[$i]}" --format='value(networkInterfaces[0].accessConfigs[0].natIP)')
if [[ -z "${node_ip-}" ]] ; then
echo "Did not find ${NODE_NAMES[$i]}" >&2
else
echo "Found ${NODE_NAMES[$i]} at ${node_ip}"
KUBE_NODE_IP_ADDRESSES+=("${node_ip}")
fi
done
if [[ -z "${KUBE_NODE_IP_ADDRESSES-}" ]]; then
echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" >&2
exit 1
fi
}
# Detect the IP for the master
#
# Assumed vars:
# MASTER_NAME
# ZONE
# REGION
# Vars set:
# KUBE_MASTER
# KUBE_MASTER_IP
function detect-master() {
detect-project
KUBE_MASTER=${MASTER_NAME}
echo "Trying to find master named '${MASTER_NAME}'" >&2
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
local master_address_name="${MASTER_NAME}-ip"
echo "Looking for address '${master_address_name}'" >&2
if ! KUBE_MASTER_IP=$(gcloud compute addresses describe "${master_address_name}" \
--project "${PROJECT}" --region "${REGION}" -q --format='value(address)') || \
[[ -z "${KUBE_MASTER_IP-}" ]]; then
echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'" >&2
exit 1
fi
fi
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)" >&2
}
function load-or-gen-kube-bearertoken() {
if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then
get-kubeconfig-bearertoken
fi
if [[ -z "${KUBE_BEARER_TOKEN:-}" ]]; then
gen-kube-bearertoken
fi
}
# Figure out which binary use on the server and assure it is available.
# If KUBE_VERSION is specified use binaries specified by it, otherwise
# use local dev binaries.
#
# Assumed vars:
# KUBE_VERSION
# KUBE_RELEASE_VERSION_REGEX
# KUBE_CI_VERSION_REGEX
# Vars set:
# KUBE_TAR_HASH
# SERVER_BINARY_TAR_URL
# SERVER_BINARY_TAR_HASH
function tars_from_version() {
local sha1sum=""
if which sha1sum >/dev/null 2>&1; then
sha1sum="sha1sum"
else
sha1sum="shasum -a1"
fi
if [[ -z "${KUBE_VERSION-}" ]]; then
find-release-tars
upload-server-tars
elif [[ ${KUBE_VERSION} =~ ${KUBE_RELEASE_VERSION_REGEX} ]]; then
SERVER_BINARY_TAR_URL="https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz"
# TODO: Clean this up.
KUBE_MANIFESTS_TAR_URL="${SERVER_BINARY_TAR_URL/server-linux-amd64/manifests}"
KUBE_MANIFESTS_TAR_HASH=$(curl ${KUBE_MANIFESTS_TAR_URL} --silent --show-error | ${sha1sum} | awk '{print $1}')
elif [[ ${KUBE_VERSION} =~ ${KUBE_CI_VERSION_REGEX} ]]; then
SERVER_BINARY_TAR_URL="https://storage.googleapis.com/kubernetes-release-dev/ci/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz"
# TODO: Clean this up.
KUBE_MANIFESTS_TAR_URL="${SERVER_BINARY_TAR_URL/server-linux-amd64/manifests}"
KUBE_MANIFESTS_TAR_HASH=$(curl ${KUBE_MANIFESTS_TAR_URL} --silent --show-error | ${sha1sum} | awk '{print $1}')
else
echo "Version doesn't match regexp" >&2
exit 1
fi
if ! SERVER_BINARY_TAR_HASH=$(curl -Ss --fail "${SERVER_BINARY_TAR_URL}.sha1"); then
echo "Failure trying to curl release .sha1"
fi
if ! curl -Ss --head "${SERVER_BINARY_TAR_URL}" >&/dev/null; then
echo "Can't find release at ${SERVER_BINARY_TAR_URL}" >&2
exit 1
fi
}
# Reads kube-env metadata from master
#
# Assumed vars:
# KUBE_MASTER
# PROJECT
# ZONE
function get-master-env() {
# TODO(zmerlynn): Make this more reliable with retries.
gcloud compute --project ${PROJECT} ssh --zone ${ZONE} ${KUBE_MASTER} --command \
"curl --fail --silent -H 'Metadata-Flavor: Google' \
'http://metadata/computeMetadata/v1/instance/attributes/kube-env'" 2>/dev/null
gcloud compute --project ${PROJECT} ssh --zone ${ZONE} ${KUBE_MASTER} --command \
"curl --fail --silent -H 'Metadata-Flavor: Google' \
'http://metadata/computeMetadata/v1/instance/attributes/kube-master-certs'" 2>/dev/null
}
# Quote something appropriate for a yaml string.
#
# TODO(zmerlynn): Note that this function doesn't so much "quote" as
# "strip out quotes", and we really should be using a YAML library for
# this, but PyYAML isn't shipped by default, and *rant rant rant ... SIGH*
function yaml-quote {
echo "'$(echo "${@:-}" | sed -e "s/'/''/g")'"
}
# Writes the cluster location into a temporary file.
# Assumed vars
# ZONE
function write-cluster-location {
cat >"${KUBE_TEMP}/cluster-location.txt" << EOF
${ZONE}
EOF
}
# Writes the cluster name into a temporary file.
# Assumed vars
# CLUSTER_NAME
function write-cluster-name {
cat >"${KUBE_TEMP}/cluster-name.txt" << EOF
${CLUSTER_NAME}
EOF
}
function write-master-env {
# If the user requested that the master be part of the cluster, set the
# environment variable to program the master kubelet to register itself.
if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" && -z "${KUBELET_APISERVER:-}" ]]; then
KUBELET_APISERVER="${MASTER_NAME}"
fi
if [[ -z "${KUBERNETES_MASTER_NAME:-}" ]]; then
KUBERNETES_MASTER_NAME="${MASTER_NAME}"
fi
construct-kubelet-flags true
build-kube-env true "${KUBE_TEMP}/master-kube-env.yaml"
build-kubelet-config true "${KUBE_TEMP}/master-kubelet-config.yaml"
build-kube-master-certs "${KUBE_TEMP}/kube-master-certs.yaml"
}
function write-node-env {
if [[ -z "${KUBERNETES_MASTER_NAME:-}" ]]; then
KUBERNETES_MASTER_NAME="${MASTER_NAME}"
fi
construct-kubelet-flags false
build-kube-env false "${KUBE_TEMP}/node-kube-env.yaml"
build-kubelet-config false "${KUBE_TEMP}/node-kubelet-config.yaml"
}
function build-node-labels {
local master=$1
local node_labels=""
if [[ "${KUBE_PROXY_DAEMONSET:-}" == "true" && "${master}" != "true" ]]; then
# Add kube-proxy daemonset label to node to avoid situation during cluster
# upgrade/downgrade when there are two instances of kube-proxy running on a node.
node_labels="beta.kubernetes.io/kube-proxy-ds-ready=true"
fi
if [[ -n "${NODE_LABELS:-}" ]]; then
node_labels="${node_labels:+${node_labels},}${NODE_LABELS}"
fi
if [[ -n "${NON_MASTER_NODE_LABELS:-}" && "${master}" != "true" ]]; then
node_labels="${node_labels:+${node_labels},}${NON_MASTER_NODE_LABELS}"
fi
echo $node_labels
}
# yaml-map-string-stringarray converts the encoded structure to yaml format, and echoes the result
# under the provided name. If the encoded structure is empty, echoes nothing.
# 1: name to be output in yaml
# 2: encoded map-string-string (which may contain duplicate keys - resulting in map-string-stringarray)
# 3: key-value separator (defaults to ':')
# 4: item separator (defaults to ',')
function yaml-map-string-stringarray {
declare -r name="${1}"
declare -r encoded="${2}"
declare -r kv_sep="${3:-:}"
declare -r item_sep="${4:-,}"
declare -a pairs # indexed array
declare -A map # associative array
IFS="${item_sep}" read -ra pairs <<<"${encoded}" # split on item_sep
for pair in "${pairs[@]}"; do
declare key
declare value
IFS="${kv_sep}" read -r key value <<<"${pair}" # split on kv_sep
map[$key]="${map[$key]+${map[$key]}${item_sep}}${value}" # append values from duplicate keys
done
# only output if there is a non-empty map
if [[ ${#map[@]} -gt 0 ]]; then
echo "${name}:"
for k in "${!map[@]}"; do
echo " ${k}:"
declare -a values
IFS="${item_sep}" read -ra values <<<"${map[$k]}"
for val in "${values[@]}"; do
# declare across two lines so errexit can catch failures
declare v
v=$(yaml-quote "${val}")
echo " - ${v}"
done
done
fi
}
# yaml-map-string-string converts the encoded structure to yaml format, and echoes the result
# under the provided name. If the encoded structure is empty, echoes nothing.
# 1: name to be output in yaml
# 2: encoded map-string-string (no duplicate keys)
# 3: bool, whether to yaml-quote the value string in the output (defaults to true)
# 4: key-value separator (defaults to ':')
# 5: item separator (defaults to ',')
function yaml-map-string-string {
declare -r name="${1}"
declare -r encoded="${2}"
declare -r quote_val_string="${3:-true}"
declare -r kv_sep="${4:-:}"
declare -r item_sep="${5:-,}"
declare -a pairs # indexed array
declare -A map # associative array
IFS="${item_sep}" read -ra pairs <<<"${encoded}" # split on item_sep # TODO(mtaufen): try quoting this too
for pair in "${pairs[@]}"; do
declare key
declare value
IFS="${kv_sep}" read -r key value <<<"${pair}" # split on kv_sep
map[$key]="${value}" # add to associative array
done
# only output if there is a non-empty map
if [[ ${#map[@]} -gt 0 ]]; then
echo "${name}:"
for k in "${!map[@]}"; do
if [[ "${quote_val_string}" == "true" ]]; then
# declare across two lines so errexit can catch failures
declare v
v=$(yaml-quote "${map[$k]}")
echo " ${k}: ${v}"
else
echo " ${k}: ${map[$k]}"
fi
done
fi
}
# $1: if 'true', we're rendering flags for a master, else a node
function construct-kubelet-flags {
local master=$1
local flags="${KUBELET_TEST_LOG_LEVEL:-"--v=2"} ${KUBELET_TEST_ARGS:-}"
flags+=" --allow-privileged=true"
flags+=" --cloud-provider=gce"
# Keep in sync with CONTAINERIZED_MOUNTER_HOME in configure-helper.sh
flags+=" --experimental-mounter-path=/home/kubernetes/containerized_mounter/mounter"
flags+=" --experimental-check-node-capabilities-before-mount=true"
# Keep in sync with the mkdir command in configure-helper.sh (until the TODO is resolved)
flags+=" --cert-dir=/var/lib/kubelet/pki/"
# Configure the directory that the Kubelet should use to store dynamic config checkpoints
flags+=" --dynamic-config-dir=/var/lib/kubelet/dynamic-config"
if [[ "${master}" == "true" ]]; then
flags+=" ${MASTER_KUBELET_TEST_ARGS:-}"
if [[ "${REGISTER_MASTER_KUBELET:-false}" == "true" ]]; then
#TODO(mikedanese): allow static pods to start before creating a client
#flags+=" --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig"
#flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig"
flags+=" --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig"
flags+=" --register-schedulable=false"
fi
else # For nodes
flags+=" ${NODE_KUBELET_TEST_ARGS:-}"
flags+=" --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig"
flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig"
fi
# Network plugin
if [[ -n "${NETWORK_PROVIDER:-}" || -n "${NETWORK_POLICY_PROVIDER:-}" ]]; then
flags+=" --cni-bin-dir=/home/kubernetes/bin"
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" || "${ENABLE_NETD:-}" == "true" ]]; then
# Calico uses CNI always.
# Note that network policy won't work for master node.
if [[ "${master}" == "true" ]]; then
flags+=" --network-plugin=${NETWORK_PROVIDER}"
else
flags+=" --network-plugin=cni"
fi
else
# Otherwise use the configured value.
flags+=" --network-plugin=${NETWORK_PROVIDER}"
fi
fi
if [[ -n "${NON_MASQUERADE_CIDR:-}" ]]; then
flags+=" --non-masquerade-cidr=${NON_MASQUERADE_CIDR}"
fi
flags+=" --volume-plugin-dir=${VOLUME_PLUGIN_DIR}"
local node_labels=$(build-node-labels ${master})
if [[ -n "${node_labels:-}" ]]; then
flags+=" --node-labels=${node_labels}"
fi
if [[ -n "${NODE_TAINTS:-}" ]]; then
flags+=" --register-with-taints=${NODE_TAINTS}"
fi
# TODO(mtaufen): ROTATE_CERTIFICATES seems unused; delete it?
if [[ -n "${ROTATE_CERTIFICATES:-}" ]]; then
flags+=" --rotate-certificates=true"
fi
if [[ -n "${CONTAINER_RUNTIME:-}" ]]; then
flags+=" --container-runtime=${CONTAINER_RUNTIME}"
fi
if [[ -n "${CONTAINER_RUNTIME_ENDPOINT:-}" ]]; then
flags+=" --container-runtime-endpoint=${CONTAINER_RUNTIME_ENDPOINT}"
fi
if [[ -n "${MAX_PODS_PER_NODE:-}" ]]; then
flags+=" --max-pods=${MAX_PODS_PER_NODE}"
fi
KUBELET_ARGS="${flags}"
}
# $1: if 'true', we're rendering config for a master, else a node
function build-kubelet-config {
local master=$1
local file=$2
rm -f "${file}"
{
declare quoted_dns_server_ip
declare quoted_dns_domain
quoted_dns_server_ip=$(yaml-quote "${DNS_SERVER_IP}")
if [[ "${ENABLE_NODELOCAL_DNS:-}" == "true" ]]; then
quoted_dns_server_ip=$(yaml-quote "${LOCAL_DNS_IP}")
fi
quoted_dns_domain=$(yaml-quote "${DNS_DOMAIN}")
cat <<EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
cgroupRoot: /
clusterDNS:
- ${quoted_dns_server_ip}
clusterDomain: ${quoted_dns_domain}
staticPodPath: /etc/kubernetes/manifests
readOnlyPort: 10255
EOF
# --- begin master-specific config ---
if [[ "${master}" == "true" ]]; then
cat <<EOF
enableDebuggingHandlers: false
hairpinMode: none
authentication:
webhook:
enabled: false
anonymous:
enabled: true
authorization:
mode: AlwaysAllow
EOF
if [[ "${REGISTER_MASTER_KUBELET:-false}" == "false" ]]; then
# Note: Standalone mode is used by GKE
declare quoted_master_ip_range
quoted_master_ip_range=$(yaml-quote "${MASTER_IP_RANGE}")
cat <<EOF
podCidr: ${quoted_master_ip_range}
EOF
fi
# --- end master-specific config ---
else
# --- begin node-specific config ---
# Keep authentication.x509.clientCAFile in sync with CA_CERT_BUNDLE_PATH in configure-helper.sh
cat <<EOF
enableDebuggingHandlers: true
authentication:
x509:
clientCAFile: /etc/srv/kubernetes/pki/ca-certificates.crt
EOF
if [[ "${HAIRPIN_MODE:-}" == "promiscuous-bridge" ]] || \
[[ "${HAIRPIN_MODE:-}" == "hairpin-veth" ]] || \
[[ "${HAIRPIN_MODE:-}" == "none" ]]; then
declare quoted_hairpin_mode
quoted_hairpin_mode=$(yaml-quote "${HAIRPIN_MODE}")
cat <<EOF
hairpinMode: ${quoted_hairpin_mode}
EOF
fi
# --- end node-specific config ---
fi
# Note: ENABLE_MANIFEST_URL is used by GKE
if [[ "${ENABLE_MANIFEST_URL:-}" == "true" ]]; then
declare quoted_manifest_url
quoted_manifest_url=$(yaml-quote "${MANIFEST_URL}")
cat <<EOF
staticPodURL: ${quoted_manifest_url}
EOF
yaml-map-string-stringarray 'staticPodURLHeader' "${MANIFEST_URL_HEADER}"
fi
if [[ -n "${EVICTION_HARD:-}" ]]; then
yaml-map-string-string 'evictionHard' "${EVICTION_HARD}" true '<'
fi
if [[ -n "${FEATURE_GATES:-}" ]]; then
yaml-map-string-string 'featureGates' "${FEATURE_GATES}" false '='
fi
} > "${file}"
}
function build-kube-master-certs {
local file=$1
rm -f ${file}
cat >$file <<EOF
KUBEAPISERVER_CERT: $(yaml-quote ${KUBEAPISERVER_CERT_BASE64:-})
KUBEAPISERVER_KEY: $(yaml-quote ${KUBEAPISERVER_KEY_BASE64:-})
CA_KEY: $(yaml-quote ${CA_KEY_BASE64:-})
AGGREGATOR_CA_KEY: $(yaml-quote ${AGGREGATOR_CA_KEY_BASE64:-})
REQUESTHEADER_CA_CERT: $(yaml-quote ${REQUESTHEADER_CA_CERT_BASE64:-})
PROXY_CLIENT_CERT: $(yaml-quote ${PROXY_CLIENT_CERT_BASE64:-})
PROXY_CLIENT_KEY: $(yaml-quote ${PROXY_CLIENT_KEY_BASE64:-})
EOF
}
# $1: if 'true', we're building a master yaml, else a node
function build-kube-env {
local master=$1
local file=$2
local server_binary_tar_url=$SERVER_BINARY_TAR_URL
local kube_manifests_tar_url="${KUBE_MANIFESTS_TAR_URL:-}"
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \
[[ "${master}" == "false" && ("${NODE_OS_DISTRIBUTION}" == "ubuntu" || "${NODE_OS_DISTRIBUTION}" == "custom") ]]; then
# TODO: Support fallback .tar.gz settings on Container Linux
server_binary_tar_url=$(split_csv "${SERVER_BINARY_TAR_URL}")
kube_manifests_tar_url=$(split_csv "${KUBE_MANIFESTS_TAR_URL}")
fi
rm -f ${file}
cat >$file <<EOF
CLUSTER_NAME: $(yaml-quote ${CLUSTER_NAME})
ENV_TIMESTAMP: $(yaml-quote $(date -u +%Y-%m-%dT%T%z))
INSTANCE_PREFIX: $(yaml-quote ${INSTANCE_PREFIX})
NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX})
NODE_TAGS: $(yaml-quote ${NODE_TAGS:-})
NODE_NETWORK: $(yaml-quote ${NETWORK:-})
NODE_SUBNETWORK: $(yaml-quote ${SUBNETWORK:-})
CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16})
SERVER_BINARY_TAR_URL: $(yaml-quote ${server_binary_tar_url})
SERVER_BINARY_TAR_HASH: $(yaml-quote ${SERVER_BINARY_TAR_HASH})
PROJECT_ID: $(yaml-quote ${PROJECT})
NETWORK_PROJECT_ID: $(yaml-quote ${NETWORK_PROJECT})
SERVICE_CLUSTER_IP_RANGE: $(yaml-quote ${SERVICE_CLUSTER_IP_RANGE})
KUBERNETES_MASTER_NAME: $(yaml-quote ${KUBERNETES_MASTER_NAME})
ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false})
ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none})
ENABLE_PROMETHEUS_MONITORING: $(yaml-quote ${ENABLE_PROMETHEUS_MONITORING:-false})
ENABLE_METRICS_SERVER: $(yaml-quote ${ENABLE_METRICS_SERVER:-false})
ENABLE_METADATA_AGENT: $(yaml-quote ${ENABLE_METADATA_AGENT:-none})
METADATA_AGENT_CPU_REQUEST: $(yaml-quote ${METADATA_AGENT_CPU_REQUEST:-})
METADATA_AGENT_MEMORY_REQUEST: $(yaml-quote ${METADATA_AGENT_MEMORY_REQUEST:-})
METADATA_AGENT_CLUSTER_LEVEL_CPU_REQUEST: $(yaml-quote ${METADATA_AGENT_CLUSTER_LEVEL_CPU_REQUEST:-})
METADATA_AGENT_CLUSTER_LEVEL_MEMORY_REQUEST: $(yaml-quote ${METADATA_AGENT_CLUSTER_LEVEL_MEMORY_REQUEST:-})
DOCKER_REGISTRY_MIRROR_URL: $(yaml-quote ${DOCKER_REGISTRY_MIRROR_URL:-})
ENABLE_L7_LOADBALANCING: $(yaml-quote ${ENABLE_L7_LOADBALANCING:-none})
ENABLE_CLUSTER_LOGGING: $(yaml-quote ${ENABLE_CLUSTER_LOGGING:-false})
ENABLE_CLUSTER_UI: $(yaml-quote ${ENABLE_CLUSTER_UI:-false})
ENABLE_NODE_PROBLEM_DETECTOR: $(yaml-quote ${ENABLE_NODE_PROBLEM_DETECTOR:-none})
NODE_PROBLEM_DETECTOR_VERSION: $(yaml-quote ${NODE_PROBLEM_DETECTOR_VERSION:-})
NODE_PROBLEM_DETECTOR_TAR_HASH: $(yaml-quote ${NODE_PROBLEM_DETECTOR_TAR_HASH:-})
ENABLE_NODE_LOGGING: $(yaml-quote ${ENABLE_NODE_LOGGING:-false})
LOGGING_DESTINATION: $(yaml-quote ${LOGGING_DESTINATION:-})
ELASTICSEARCH_LOGGING_REPLICAS: $(yaml-quote ${ELASTICSEARCH_LOGGING_REPLICAS:-})
ENABLE_CLUSTER_DNS: $(yaml-quote ${ENABLE_CLUSTER_DNS:-false})
CLUSTER_DNS_CORE_DNS: $(yaml-quote ${CLUSTER_DNS_CORE_DNS:-true})
ENABLE_NODELOCAL_DNS: $(yaml-quote ${ENABLE_NODELOCAL_DNS:-false})
DNS_SERVER_IP: $(yaml-quote ${DNS_SERVER_IP:-})
LOCAL_DNS_IP: $(yaml-quote ${LOCAL_DNS_IP:-})
DNS_DOMAIN: $(yaml-quote ${DNS_DOMAIN:-})
ENABLE_DNS_HORIZONTAL_AUTOSCALER: $(yaml-quote ${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false})
KUBE_PROXY_DAEMONSET: $(yaml-quote ${KUBE_PROXY_DAEMONSET:-false})
KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-})
KUBE_PROXY_MODE: $(yaml-quote ${KUBE_PROXY_MODE:-iptables})
NODE_PROBLEM_DETECTOR_TOKEN: $(yaml-quote ${NODE_PROBLEM_DETECTOR_TOKEN:-})
ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-})
ENABLE_POD_SECURITY_POLICY: $(yaml-quote ${ENABLE_POD_SECURITY_POLICY:-})
MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE})
RUNTIME_CONFIG: $(yaml-quote ${RUNTIME_CONFIG})
CA_CERT: $(yaml-quote ${CA_CERT_BASE64:-})
KUBELET_CERT: $(yaml-quote ${KUBELET_CERT_BASE64:-})
KUBELET_KEY: $(yaml-quote ${KUBELET_KEY_BASE64:-})
NETWORK_PROVIDER: $(yaml-quote ${NETWORK_PROVIDER:-})
NETWORK_POLICY_PROVIDER: $(yaml-quote ${NETWORK_POLICY_PROVIDER:-})
HAIRPIN_MODE: $(yaml-quote ${HAIRPIN_MODE:-})
E2E_STORAGE_TEST_ENVIRONMENT: $(yaml-quote ${E2E_STORAGE_TEST_ENVIRONMENT:-})
KUBE_DOCKER_REGISTRY: $(yaml-quote ${KUBE_DOCKER_REGISTRY:-})
KUBE_ADDON_REGISTRY: $(yaml-quote ${KUBE_ADDON_REGISTRY:-})
MULTIZONE: $(yaml-quote ${MULTIZONE:-})
NON_MASQUERADE_CIDR: $(yaml-quote ${NON_MASQUERADE_CIDR:-})
ENABLE_DEFAULT_STORAGE_CLASS: $(yaml-quote ${ENABLE_DEFAULT_STORAGE_CLASS:-})
ENABLE_APISERVER_ADVANCED_AUDIT: $(yaml-quote ${ENABLE_APISERVER_ADVANCED_AUDIT:-})
ENABLE_CACHE_MUTATION_DETECTOR: $(yaml-quote ${ENABLE_CACHE_MUTATION_DETECTOR:-false})
ENABLE_PATCH_CONVERSION_DETECTOR: $(yaml-quote ${ENABLE_PATCH_CONVERSION_DETECTOR:-false})
ADVANCED_AUDIT_POLICY: $(yaml-quote ${ADVANCED_AUDIT_POLICY:-})
ADVANCED_AUDIT_BACKEND: $(yaml-quote ${ADVANCED_AUDIT_BACKEND:-log})
ADVANCED_AUDIT_TRUNCATING_BACKEND: $(yaml-quote ${ADVANCED_AUDIT_TRUNCATING_BACKEND:-true})
ADVANCED_AUDIT_LOG_MODE: $(yaml-quote ${ADVANCED_AUDIT_LOG_MODE:-})
ADVANCED_AUDIT_LOG_BUFFER_SIZE: $(yaml-quote ${ADVANCED_AUDIT_LOG_BUFFER_SIZE:-})
ADVANCED_AUDIT_LOG_MAX_BATCH_SIZE: $(yaml-quote ${ADVANCED_AUDIT_LOG_MAX_BATCH_SIZE:-})
ADVANCED_AUDIT_LOG_MAX_BATCH_WAIT: $(yaml-quote ${ADVANCED_AUDIT_LOG_MAX_BATCH_WAIT:-})
ADVANCED_AUDIT_LOG_THROTTLE_QPS: $(yaml-quote ${ADVANCED_AUDIT_LOG_THROTTLE_QPS:-})
ADVANCED_AUDIT_LOG_THROTTLE_BURST: $(yaml-quote ${ADVANCED_AUDIT_LOG_THROTTLE_BURST:-})
ADVANCED_AUDIT_LOG_INITIAL_BACKOFF: $(yaml-quote ${ADVANCED_AUDIT_LOG_INITIAL_BACKOFF:-})
ADVANCED_AUDIT_WEBHOOK_MODE: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_MODE:-})
ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE:-})
ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_SIZE: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_SIZE:-})
ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_WAIT: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_WAIT:-})
ADVANCED_AUDIT_WEBHOOK_THROTTLE_QPS: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_THROTTLE_QPS:-})
ADVANCED_AUDIT_WEBHOOK_THROTTLE_BURST: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_THROTTLE_BURST:-})
ADVANCED_AUDIT_WEBHOOK_INITIAL_BACKOFF: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_INITIAL_BACKOFF:-})
GCE_API_ENDPOINT: $(yaml-quote ${GCE_API_ENDPOINT:-})
GCE_GLBC_IMAGE: $(yaml-quote ${GCE_GLBC_IMAGE:-})
ENABLE_NODE_JOURNAL: $(yaml-quote ${ENABLE_NODE_JOURNAL:-false})
PROMETHEUS_TO_SD_ENDPOINT: $(yaml-quote ${PROMETHEUS_TO_SD_ENDPOINT:-})
PROMETHEUS_TO_SD_PREFIX: $(yaml-quote ${PROMETHEUS_TO_SD_PREFIX:-})
ENABLE_PROMETHEUS_TO_SD: $(yaml-quote ${ENABLE_PROMETHEUS_TO_SD:-false})
DISABLE_PROMETHEUS_TO_SD_IN_DS: $(yaml-quote ${DISABLE_PROMETHEUS_TO_SD_IN_DS:-false})
ENABLE_POD_PRIORITY: $(yaml-quote ${ENABLE_POD_PRIORITY:-})
CONTAINER_RUNTIME: $(yaml-quote ${CONTAINER_RUNTIME:-})
CONTAINER_RUNTIME_ENDPOINT: $(yaml-quote ${CONTAINER_RUNTIME_ENDPOINT:-})
CONTAINER_RUNTIME_NAME: $(yaml-quote ${CONTAINER_RUNTIME_NAME:-})
NODE_LOCAL_SSDS_EXT: $(yaml-quote ${NODE_LOCAL_SSDS_EXT:-})
LOAD_IMAGE_COMMAND: $(yaml-quote ${LOAD_IMAGE_COMMAND:-})
ZONE: $(yaml-quote ${ZONE})
REGION: $(yaml-quote ${REGION})
VOLUME_PLUGIN_DIR: $(yaml-quote ${VOLUME_PLUGIN_DIR})
KUBELET_ARGS: $(yaml-quote ${KUBELET_ARGS})
REQUIRE_METADATA_KUBELET_CONFIG_FILE: $(yaml-quote true)
ENABLE_NETD: $(yaml-quote ${ENABLE_NETD:-false})
ENABLE_NODE_TERMINATION_HANDLER: $(yaml-quote ${ENABLE_NODE_TERMINATION_HANDLER:-false})
CUSTOM_NETD_YAML: |
$(echo "${CUSTOM_NETD_YAML:-}" | sed -e "s/'/''/g")
CUSTOM_CALICO_NODE_DAEMONSET_YAML: |
$(echo "${CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}" | sed -e "s/'/''/g")
CUSTOM_TYPHA_DEPLOYMENT_YAML: |
$(echo "${CUSTOM_TYPHA_DEPLOYMENT_YAML:-}" | sed -e "s/'/''/g")
EOF
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "gci" ]] || \
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "gci" ]] || \
[[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "cos" ]] || \
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "cos" ]]; then
cat >>$file <<EOF
REMOUNT_VOLUME_PLUGIN_DIR: $(yaml-quote ${REMOUNT_VOLUME_PLUGIN_DIR:-true})
EOF
fi
if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}" ]; then
cat >>$file <<EOF
KUBE_APISERVER_REQUEST_TIMEOUT: $(yaml-quote ${KUBE_APISERVER_REQUEST_TIMEOUT})
EOF
fi
if [ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]; then
cat >>$file <<EOF
TERMINATED_POD_GC_THRESHOLD: $(yaml-quote ${TERMINATED_POD_GC_THRESHOLD})
EOF
fi
if [[ "${master}" == "true" && ("${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "ubuntu") ]] || \
[[ "${master}" == "false" && ("${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" = "ubuntu" || "${NODE_OS_DISTRIBUTION}" = "custom") ]] ; then
cat >>$file <<EOF
KUBE_MANIFESTS_TAR_URL: $(yaml-quote ${kube_manifests_tar_url})
KUBE_MANIFESTS_TAR_HASH: $(yaml-quote ${KUBE_MANIFESTS_TAR_HASH})
EOF
fi
if [ -n "${TEST_CLUSTER:-}" ]; then
cat >>$file <<EOF
TEST_CLUSTER: $(yaml-quote ${TEST_CLUSTER})
EOF
fi
if [ -n "${DOCKER_TEST_LOG_LEVEL:-}" ]; then
cat >>$file <<EOF
DOCKER_TEST_LOG_LEVEL: $(yaml-quote ${DOCKER_TEST_LOG_LEVEL})
EOF
fi
if [ -n "${DOCKER_LOG_DRIVER:-}" ]; then
cat >>$file <<EOF
DOCKER_LOG_DRIVER: $(yaml-quote ${DOCKER_LOG_DRIVER})
EOF
fi
if [ -n "${DOCKER_LOG_MAX_SIZE:-}" ]; then
cat >>$file <<EOF
DOCKER_LOG_MAX_SIZE: $(yaml-quote ${DOCKER_LOG_MAX_SIZE})
EOF
fi
if [ -n "${DOCKER_LOG_MAX_FILE:-}" ]; then
cat >>$file <<EOF
DOCKER_LOG_MAX_FILE: $(yaml-quote ${DOCKER_LOG_MAX_FILE})
EOF
fi
if [ -n "${FEATURE_GATES:-}" ]; then
cat >>$file <<EOF
FEATURE_GATES: $(yaml-quote ${FEATURE_GATES})
EOF
fi
if [ -n "${PROVIDER_VARS:-}" ]; then
local var_name
local var_value
for var_name in ${PROVIDER_VARS}; do
eval "local var_value=\$(yaml-quote \${${var_name}})"
cat >>$file <<EOF
${var_name}: ${var_value}
EOF
done
fi
if [[ "${master}" == "true" ]]; then
# Master-only env vars.
cat >>$file <<EOF
KUBERNETES_MASTER: $(yaml-quote "true")
KUBE_USER: $(yaml-quote ${KUBE_USER})
KUBE_PASSWORD: $(yaml-quote ${KUBE_PASSWORD})
KUBE_BEARER_TOKEN: $(yaml-quote ${KUBE_BEARER_TOKEN})
MASTER_CERT: $(yaml-quote ${MASTER_CERT_BASE64:-})
MASTER_KEY: $(yaml-quote ${MASTER_KEY_BASE64:-})
KUBECFG_CERT: $(yaml-quote ${KUBECFG_CERT_BASE64:-})
KUBECFG_KEY: $(yaml-quote ${KUBECFG_KEY_BASE64:-})
KUBELET_APISERVER: $(yaml-quote ${KUBELET_APISERVER:-})
NUM_NODES: $(yaml-quote ${NUM_NODES})
STORAGE_BACKEND: $(yaml-quote ${STORAGE_BACKEND:-etcd3})
STORAGE_MEDIA_TYPE: $(yaml-quote ${STORAGE_MEDIA_TYPE:-})
ENABLE_GARBAGE_COLLECTOR: $(yaml-quote ${ENABLE_GARBAGE_COLLECTOR:-})
ENABLE_LEGACY_ABAC: $(yaml-quote ${ENABLE_LEGACY_ABAC:-})
MASTER_ADVERTISE_ADDRESS: $(yaml-quote ${MASTER_ADVERTISE_ADDRESS:-})
ETCD_CA_KEY: $(yaml-quote ${ETCD_CA_KEY_BASE64:-})
ETCD_CA_CERT: $(yaml-quote ${ETCD_CA_CERT_BASE64:-})
ETCD_PEER_KEY: $(yaml-quote ${ETCD_PEER_KEY_BASE64:-})
ETCD_PEER_CERT: $(yaml-quote ${ETCD_PEER_CERT_BASE64:-})
ENCRYPTION_PROVIDER_CONFIG: $(yaml-quote ${ENCRYPTION_PROVIDER_CONFIG:-})
SERVICEACCOUNT_ISSUER: $(yaml-quote ${SERVICEACCOUNT_ISSUER:-})
EOF
# KUBE_APISERVER_REQUEST_TIMEOUT_SEC (if set) controls the --request-timeout
# flag
if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT_SEC:-}" ]; then
cat >>$file <<EOF
KUBE_APISERVER_REQUEST_TIMEOUT_SEC: $(yaml-quote ${KUBE_APISERVER_REQUEST_TIMEOUT_SEC})
EOF
fi
# ETCD_IMAGE (if set) allows to use a custom etcd image.
if [ -n "${ETCD_IMAGE:-}" ]; then
cat >>$file <<EOF
ETCD_IMAGE: $(yaml-quote ${ETCD_IMAGE})
EOF
fi
# ETCD_DOCKER_REPOSITORY (if set) allows to use a custom etcd docker repository to pull the etcd image from.
if [ -n "${ETCD_DOCKER_REPOSITORY:-}" ]; then
cat >>$file <<EOF
ETCD_DOCKER_REPOSITORY: $(yaml-quote ${ETCD_DOCKER_REPOSITORY})
EOF
fi
# ETCD_VERSION (if set) allows you to use custom version of etcd.
# The main purpose of using it may be rollback of etcd v3 API,
# where we need 3.0.* image, but are rolling back to 2.3.7.
if [ -n "${ETCD_VERSION:-}" ]; then
cat >>$file <<EOF
ETCD_VERSION: $(yaml-quote ${ETCD_VERSION})
EOF
fi
if [ -n "${ETCD_HOSTNAME:-}" ]; then
cat >>$file <<EOF
ETCD_HOSTNAME: $(yaml-quote ${ETCD_HOSTNAME})
EOF
fi
if [ -n "${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then
cat >>$file <<EOF
ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC: $(yaml-quote ${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC})
EOF
fi
if [ -n "${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then
cat >>$file <<EOF
KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC: $(yaml-quote ${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC})
EOF
fi
if [ -n "${ETCD_COMPACTION_INTERVAL_SEC:-}" ]; then
cat >>$file <<EOF
ETCD_COMPACTION_INTERVAL_SEC: $(yaml-quote ${ETCD_COMPACTION_INTERVAL_SEC})
EOF
fi
if [ -n "${ETCD_QUOTA_BACKEND_BYTES:-}" ]; then
cat >>$file <<EOF
ETCD_QUOTA_BACKEND_BYTES: $(yaml-quote ${ETCD_QUOTA_BACKEND_BYTES})
EOF
fi
if [ -n "${ETCD_EXTRA_ARGS:-}" ]; then
cat >>$file <<EOF
ETCD_EXTRA_ARGS: $(yaml-quote ${ETCD_EXTRA_ARGS})
EOF
fi
if [ -n "${ETCD_SERVERS:-}" ]; then
cat >>$file <<EOF
ETCD_SERVERS: $(yaml-quote ${ETCD_SERVERS})
EOF
fi
if [ -n "${ETCD_SERVERS_OVERRIDES:-}" ]; then
cat >>$file <<EOF
ETCD_SERVERS_OVERRIDES: $(yaml-quote ${ETCD_SERVERS_OVERRIDES})
EOF
fi
if [ -n "${APISERVER_TEST_ARGS:-}" ]; then
cat >>$file <<EOF
APISERVER_TEST_ARGS: $(yaml-quote ${APISERVER_TEST_ARGS})
EOF
fi
if [ -n "${CONTROLLER_MANAGER_TEST_ARGS:-}" ]; then
cat >>$file <<EOF
CONTROLLER_MANAGER_TEST_ARGS: $(yaml-quote ${CONTROLLER_MANAGER_TEST_ARGS})
EOF
fi
if [ -n "${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-}" ]; then
cat >>$file <<EOF
CONTROLLER_MANAGER_TEST_LOG_LEVEL: $(yaml-quote ${CONTROLLER_MANAGER_TEST_LOG_LEVEL})
EOF
fi
if [ -n "${SCHEDULER_TEST_ARGS:-}" ]; then
cat >>$file <<EOF
SCHEDULER_TEST_ARGS: $(yaml-quote ${SCHEDULER_TEST_ARGS})
EOF
fi
if [ -n "${SCHEDULER_TEST_LOG_LEVEL:-}" ]; then
cat >>$file <<EOF
SCHEDULER_TEST_LOG_LEVEL: $(yaml-quote ${SCHEDULER_TEST_LOG_LEVEL})
EOF
fi
if [ -n "${INITIAL_ETCD_CLUSTER:-}" ]; then
cat >>$file <<EOF
INITIAL_ETCD_CLUSTER: $(yaml-quote ${INITIAL_ETCD_CLUSTER})
EOF
fi
if [ -n "${INITIAL_ETCD_CLUSTER_STATE:-}" ]; then
cat >>$file <<EOF
INITIAL_ETCD_CLUSTER_STATE: $(yaml-quote ${INITIAL_ETCD_CLUSTER_STATE})
EOF
fi
if [ -n "${CLUSTER_SIGNING_DURATION:-}" ]; then
cat >>$file <<EOF
CLUSTER_SIGNING_DURATION: $(yaml-quote ${CLUSTER_SIGNING_DURATION})
EOF
fi
if [[ "${NODE_ACCELERATORS:-}" == *"type=nvidia"* ]]; then
cat >>$file <<EOF
ENABLE_NVIDIA_GPU_DEVICE_PLUGIN: $(yaml-quote "true")
EOF
fi
if [ -n "${ADDON_MANAGER_LEADER_ELECTION:-}" ]; then
cat >>$file <<EOF
ADDON_MANAGER_LEADER_ELECTION: $(yaml-quote ${ADDON_MANAGER_LEADER_ELECTION})
EOF
fi
else
# Node-only env vars.
cat >>$file <<EOF
KUBERNETES_MASTER: $(yaml-quote "false")
EXTRA_DOCKER_OPTS: $(yaml-quote ${EXTRA_DOCKER_OPTS:-})
EOF
if [ -n "${KUBEPROXY_TEST_ARGS:-}" ]; then
cat >>$file <<EOF
KUBEPROXY_TEST_ARGS: $(yaml-quote ${KUBEPROXY_TEST_ARGS})
EOF
fi
if [ -n "${KUBEPROXY_TEST_LOG_LEVEL:-}" ]; then
cat >>$file <<EOF
KUBEPROXY_TEST_LOG_LEVEL: $(yaml-quote ${KUBEPROXY_TEST_LOG_LEVEL})
EOF
fi
fi
if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
cat >>$file <<EOF
ENABLE_CLUSTER_AUTOSCALER: $(yaml-quote ${ENABLE_CLUSTER_AUTOSCALER})
AUTOSCALER_MIG_CONFIG: $(yaml-quote ${AUTOSCALER_MIG_CONFIG})
AUTOSCALER_EXPANDER_CONFIG: $(yaml-quote ${AUTOSCALER_EXPANDER_CONFIG})
EOF
if [[ "${master}" == "false" ]]; then
# TODO(kubernetes/autoscaler#718): AUTOSCALER_ENV_VARS is a hotfix for cluster autoscaler,
# which reads the kube-env to determine the shape of a node and was broken by #60020.
# This should be removed as soon as a more reliable source of information is available!
local node_labels=$(build-node-labels false)
local node_taints="${NODE_TAINTS:-}"
local autoscaler_env_vars="node_labels=${node_labels};node_taints=${node_taints}"
cat >>$file <<EOF
AUTOSCALER_ENV_VARS: $(yaml-quote ${autoscaler_env_vars})
EOF
fi
fi
if [ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]; then
cat >>$file <<EOF
SCHEDULING_ALGORITHM_PROVIDER: $(yaml-quote ${SCHEDULING_ALGORITHM_PROVIDER})
EOF
fi
if [ -n "${MAX_PODS_PER_NODE:-}" ]; then
cat >>$file <<EOF
MAX_PODS_PER_NODE: $(yaml-quote ${MAX_PODS_PER_NODE})
EOF
fi
}
function sha1sum-file() {
if which sha1sum >/dev/null 2>&1; then
sha1sum "$1" | awk '{ print $1 }'
else
shasum -a1 "$1" | awk '{ print $1 }'
fi
}
# Create certificate pairs for the cluster.
# $1: The public IP for the master.
#
# These are used for static cert distribution (e.g. static clustering) at
# cluster creation time. This will be obsoleted once we implement dynamic
# clustering.
#
# The following certificate pairs are created:
#
# - ca (the cluster's certificate authority)
# - server
# - kubelet
# - kubecfg (for kubectl)
#
# TODO(roberthbailey): Replace easyrsa with a simple Go program to generate
# the certs that we need.
#
# Assumed vars
# KUBE_TEMP
# MASTER_NAME
#
# Vars set:
# CERT_DIR
# CA_CERT_BASE64
# MASTER_CERT_BASE64
# MASTER_KEY_BASE64
# KUBELET_CERT_BASE64
# KUBELET_KEY_BASE64
# KUBECFG_CERT_BASE64
# KUBECFG_KEY_BASE64
function create-certs {
local -r primary_cn="${1}"
# Determine extra certificate names for master
local octets=($(echo "${SERVICE_CLUSTER_IP_RANGE}" | sed -e 's|/.*||' -e 's/\./ /g'))
((octets[3]+=1))
local -r service_ip=$(echo "${octets[*]}" | sed 's/ /./g')
local sans=""
for extra in $@; do
if [[ -n "${extra}" ]]; then
sans="${sans}IP:${extra},"
fi
done
sans="${sans}IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${DNS_DOMAIN},DNS:${MASTER_NAME}"
echo "Generating certs for alternate-names: ${sans}"
setup-easyrsa
PRIMARY_CN="${primary_cn}" SANS="${sans}" generate-certs
AGGREGATOR_PRIMARY_CN="${primary_cn}" AGGREGATOR_SANS="${sans}" generate-aggregator-certs
# By default, linux wraps base64 output every 76 cols, so we use 'tr -d' to remove whitespaces.
# Note 'base64 -w0' doesn't work on Mac OS X, which has different flags.
CA_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/ca.key" | base64 | tr -d '\r\n')
CA_CERT_BASE64=$(cat "${CERT_DIR}/pki/ca.crt" | base64 | tr -d '\r\n')
MASTER_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/${MASTER_NAME}.crt" | base64 | tr -d '\r\n')
MASTER_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/${MASTER_NAME}.key" | base64 | tr -d '\r\n')
KUBELET_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/kubelet.crt" | base64 | tr -d '\r\n')
KUBELET_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/kubelet.key" | base64 | tr -d '\r\n')
KUBECFG_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/kubecfg.crt" | base64 | tr -d '\r\n')
KUBECFG_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/kubecfg.key" | base64 | tr -d '\r\n')
KUBEAPISERVER_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/kube-apiserver.crt" | base64 | tr -d '\r\n')
KUBEAPISERVER_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/kube-apiserver.key" | base64 | tr -d '\r\n')
# Setting up an addition directory (beyond pki) as it is the simplest way to
# ensure we get a different CA pair to sign the proxy-client certs and which
# we can send CA public key to the user-apiserver to validate communication.
AGGREGATOR_CA_KEY_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/private/ca.key" | base64 | tr -d '\r\n')
REQUESTHEADER_CA_CERT_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/ca.crt" | base64 | tr -d '\r\n')
PROXY_CLIENT_CERT_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/issued/proxy-client.crt" | base64 | tr -d '\r\n')
PROXY_CLIENT_KEY_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/private/proxy-client.key" | base64 | tr -d '\r\n')
}
# Set up easy-rsa directory structure.
#
# Assumed vars
# KUBE_TEMP
#
# Vars set:
# CERT_DIR
# AGGREGATOR_CERT_DIR
function setup-easyrsa {
local -r cert_create_debug_output=$(mktemp "${KUBE_TEMP}/cert_create_debug_output.XXX")
# Note: This was heavily cribbed from make-ca-cert.sh
(set -x
cd "${KUBE_TEMP}"
curl -L -O --connect-timeout 20 --retry 6 --retry-delay 2 https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz
tar xzf easy-rsa.tar.gz
mkdir easy-rsa-master/kubelet
cp -r easy-rsa-master/easyrsa3/* easy-rsa-master/kubelet
mkdir easy-rsa-master/aggregator
cp -r easy-rsa-master/easyrsa3/* easy-rsa-master/aggregator) &>${cert_create_debug_output} || true
CERT_DIR="${KUBE_TEMP}/easy-rsa-master/easyrsa3"
AGGREGATOR_CERT_DIR="${KUBE_TEMP}/easy-rsa-master/aggregator"
if [ ! -x "${CERT_DIR}/easyrsa" -o ! -x "${AGGREGATOR_CERT_DIR}/easyrsa" ]; then
# TODO(roberthbailey,porridge): add better error handling here,
# see https://github.com/kubernetes/kubernetes/issues/55229
cat "${cert_create_debug_output}" >&2
echo "=== Failed to setup easy-rsa: Aborting ===" >&2
exit 2
fi
}
# Runs the easy RSA commands to generate certificate files.
# The generated files are IN ${CERT_DIR}
#
# Assumed vars
# KUBE_TEMP
# MASTER_NAME
# CERT_DIR
# PRIMARY_CN: Primary canonical name
# SANS: Subject alternate names
#
#
function generate-certs {
local -r cert_create_debug_output=$(mktemp "${KUBE_TEMP}/cert_create_debug_output.XXX")
# Note: This was heavily cribbed from make-ca-cert.sh
(set -x
cd "${CERT_DIR}"
./easyrsa init-pki
# this puts the cert into pki/ca.crt and the key into pki/private/ca.key
./easyrsa --batch "--req-cn=${PRIMARY_CN}@$(date +%s)" build-ca nopass
./easyrsa --subject-alt-name="${SANS}" build-server-full "${MASTER_NAME}" nopass
./easyrsa build-client-full kube-apiserver nopass
kube::util::ensure-cfssl "${KUBE_TEMP}/cfssl"
# make the config for the signer
echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment","client auth"]}}}' > "ca-config.json"
# create the kubelet client cert with the correct groups
echo '{"CN":"kubelet","names":[{"O":"system:nodes"}],"hosts":[""],"key":{"algo":"rsa","size":2048}}' | "${CFSSL_BIN}" gencert -ca=pki/ca.crt -ca-key=pki/private/ca.key -config=ca-config.json - | "${CFSSLJSON_BIN}" -bare kubelet
mv "kubelet-key.pem" "pki/private/kubelet.key"
mv "kubelet.pem" "pki/issued/kubelet.crt"
rm -f "kubelet.csr"
# Make a superuser client cert with subject "O=system:masters, CN=kubecfg"
./easyrsa --dn-mode=org \
--req-cn=kubecfg --req-org=system:masters \
--req-c= --req-st= --req-city= --req-email= --req-ou= \
build-client-full kubecfg nopass) &>${cert_create_debug_output} || true
local output_file_missing=0
local output_file
for output_file in \
"${CERT_DIR}/pki/private/ca.key" \
"${CERT_DIR}/pki/ca.crt" \
"${CERT_DIR}/pki/issued/${MASTER_NAME}.crt" \
"${CERT_DIR}/pki/private/${MASTER_NAME}.key" \
"${CERT_DIR}/pki/issued/kubelet.crt" \
"${CERT_DIR}/pki/private/kubelet.key" \
"${CERT_DIR}/pki/issued/kubecfg.crt" \
"${CERT_DIR}/pki/private/kubecfg.key" \
"${CERT_DIR}/pki/issued/kube-apiserver.crt" \
"${CERT_DIR}/pki/private/kube-apiserver.key"
do
if [[ ! -s "${output_file}" ]]; then
echo "Expected file ${output_file} not created" >&2
output_file_missing=1
fi
done
if (( $output_file_missing )); then
# TODO(roberthbailey,porridge): add better error handling here,
# see https://github.com/kubernetes/kubernetes/issues/55229
cat "${cert_create_debug_output}" >&2
echo "=== Failed to generate master certificates: Aborting ===" >&2
exit 2
fi
}
# Runs the easy RSA commands to generate aggregator certificate files.
# The generated files are in ${AGGREGATOR_CERT_DIR}
#
# Assumed vars
# KUBE_TEMP
# AGGREGATOR_MASTER_NAME
# AGGREGATOR_CERT_DIR
# AGGREGATOR_PRIMARY_CN: Primary canonical name
# AGGREGATOR_SANS: Subject alternate names
#
#
function generate-aggregator-certs {
local -r cert_create_debug_output=$(mktemp "${KUBE_TEMP}/cert_create_debug_output.XXX")
# Note: This was heavily cribbed from make-ca-cert.sh
(set -x
cd "${KUBE_TEMP}/easy-rsa-master/aggregator"
./easyrsa init-pki
# this puts the cert into pki/ca.crt and the key into pki/private/ca.key
./easyrsa --batch "--req-cn=${AGGREGATOR_PRIMARY_CN}@$(date +%s)" build-ca nopass
./easyrsa --subject-alt-name="${AGGREGATOR_SANS}" build-server-full "${AGGREGATOR_MASTER_NAME}" nopass
./easyrsa build-client-full aggregator-apiserver nopass
kube::util::ensure-cfssl "${KUBE_TEMP}/cfssl"
# make the config for the signer
echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment","client auth"]}}}' > "ca-config.json"
# create the aggregator client cert with the correct groups
echo '{"CN":"aggregator","hosts":[""],"key":{"algo":"rsa","size":2048}}' | "${CFSSL_BIN}" gencert -ca=pki/ca.crt -ca-key=pki/private/ca.key -config=ca-config.json - | "${CFSSLJSON_BIN}" -bare proxy-client
mv "proxy-client-key.pem" "pki/private/proxy-client.key"
mv "proxy-client.pem" "pki/issued/proxy-client.crt"
rm -f "proxy-client.csr"
# Make a superuser client cert with subject "O=system:masters, CN=kubecfg"
./easyrsa --dn-mode=org \
--req-cn=proxy-clientcfg --req-org=system:aggregator \
--req-c= --req-st= --req-city= --req-email= --req-ou= \
build-client-full proxy-clientcfg nopass) &>${cert_create_debug_output} || true
local output_file_missing=0
local output_file
for output_file in \
"${AGGREGATOR_CERT_DIR}/pki/private/ca.key" \
"${AGGREGATOR_CERT_DIR}/pki/ca.crt" \
"${AGGREGATOR_CERT_DIR}/pki/issued/proxy-client.crt" \
"${AGGREGATOR_CERT_DIR}/pki/private/proxy-client.key"
do
if [[ ! -s "${output_file}" ]]; then
echo "Expected file ${output_file} not created" >&2
output_file_missing=1
fi
done
if (( $output_file_missing )); then
# TODO(roberthbailey,porridge): add better error handling here,
# see https://github.com/kubernetes/kubernetes/issues/55229
cat "${cert_create_debug_output}" >&2
echo "=== Failed to generate aggregator certificates: Aborting ===" >&2
exit 2
fi
}
#
# Using provided master env, extracts value from provided key.
#
# Args:
# $1 master env (kube-env of master; result of calling get-master-env)
# $2 env key to use
function get-env-val() {
local match=`(echo "${1}" | grep -E "^${2}:") || echo ""`
if [[ -z ${match} ]]; then
echo ""
fi
echo ${match} | cut -d : -f 2 | cut -d \' -f 2
}
# Load the master env by calling get-master-env, and extract important values
function parse-master-env() {
# Get required master env vars
local master_env=$(get-master-env)
KUBE_PROXY_TOKEN=$(get-env-val "${master_env}" "KUBE_PROXY_TOKEN")
NODE_PROBLEM_DETECTOR_TOKEN=$(get-env-val "${master_env}" "NODE_PROBLEM_DETECTOR_TOKEN")
CA_CERT_BASE64=$(get-env-val "${master_env}" "CA_CERT")
CA_KEY_BASE64=$(get-env-val "${master_env}" "CA_KEY")
KUBEAPISERVER_CERT_BASE64=$(get-env-val "${master_env}" "KUBEAPISERVER_CERT")
KUBEAPISERVER_KEY_BASE64=$(get-env-val "${master_env}" "KUBEAPISERVER_KEY")
EXTRA_DOCKER_OPTS=$(get-env-val "${master_env}" "EXTRA_DOCKER_OPTS")
KUBELET_CERT_BASE64=$(get-env-val "${master_env}" "KUBELET_CERT")
KUBELET_KEY_BASE64=$(get-env-val "${master_env}" "KUBELET_KEY")
MASTER_CERT_BASE64=$(get-env-val "${master_env}" "MASTER_CERT")
MASTER_KEY_BASE64=$(get-env-val "${master_env}" "MASTER_KEY")
AGGREGATOR_CA_KEY_BASE64=$(get-env-val "${master_env}" "AGGREGATOR_CA_KEY")
REQUESTHEADER_CA_CERT_BASE64=$(get-env-val "${master_env}" "REQUESTHEADER_CA_CERT")
PROXY_CLIENT_CERT_BASE64=$(get-env-val "${master_env}" "PROXY_CLIENT_CERT")
PROXY_CLIENT_KEY_BASE64=$(get-env-val "${master_env}" "PROXY_CLIENT_KEY")
ENABLE_LEGACY_ABAC=$(get-env-val "${master_env}" "ENABLE_LEGACY_ABAC")
}
# Update or verify required gcloud components are installed
# at minimum required version.
# Assumed vars
# KUBE_PROMPT_FOR_UPDATE
function update-or-verify-gcloud() {
local sudo_prefix=""
if [ ! -w $(dirname `which gcloud`) ]; then
sudo_prefix="sudo"
fi
# update and install components as needed
if [[ "${KUBE_PROMPT_FOR_UPDATE}" == "y" ]]; then
${sudo_prefix} gcloud ${gcloud_prompt:-} components install alpha
${sudo_prefix} gcloud ${gcloud_prompt:-} components install beta
${sudo_prefix} gcloud ${gcloud_prompt:-} components update
else
local version=$(gcloud version --format=json)
python -c'
import json,sys
from distutils import version
minVersion = version.LooseVersion("1.3.0")
required = [ "alpha", "beta", "core" ]
data = json.loads(sys.argv[1])
rel = data.get("Google Cloud SDK")
if "CL @" in rel:
print("Using dev version of gcloud: %s" %rel)
exit(0)
if rel != "HEAD" and version.LooseVersion(rel) < minVersion:
print("gcloud version out of date ( < %s )" % minVersion)
exit(1)
missing = []
for c in required:
if not data.get(c):
missing += [c]
if missing:
for c in missing:
print ("missing required gcloud component \"{0}\"".format(c))
exit(1)
' """${version}"""
fi
}
# Robustly try to create a static ip.
# $1: The name of the ip to create
# $2: The name of the region to create the ip in.
function create-static-ip() {
detect-project
local attempt=0
local REGION="$2"
while true; do
if gcloud compute addresses create "$1" \
--project "${PROJECT}" \
--region "${REGION}" -q > /dev/null; then
# successful operation - wait until it's visible
start="$(date +%s)"
while true; do
now="$(date +%s)"
# Timeout set to 15 minutes
if [[ $((now - start)) -gt 900 ]]; then
echo "Timeout while waiting for master IP visibility"
exit 2
fi
if gcloud compute addresses describe "$1" --project "${PROJECT}" --region "${REGION}" >/dev/null 2>&1; then
break
fi
echo "Master IP not visible yet. Waiting..."
sleep 5
done
break
fi
if gcloud compute addresses describe "$1" \
--project "${PROJECT}" \
--region "${REGION}" >/dev/null 2>&1; then
# it exists - postcondition satisfied
break
fi
if (( attempt > 4 )); then
echo -e "${color_red}Failed to create static ip $1 ${color_norm}" >&2
exit 2
fi
attempt=$(($attempt+1))
echo -e "${color_yellow}Attempt $attempt failed to create static ip $1. Retrying.${color_norm}" >&2
sleep $(($attempt * 5))
done
}
# Robustly try to create a firewall rule.
# $1: The name of firewall rule.
# $2: IP ranges.
# $3: Target tags for this firewall rule.
function create-firewall-rule() {
detect-project
local attempt=0
while true; do
if ! gcloud compute firewall-rules create "$1" \
--project "${NETWORK_PROJECT}" \
--network "${NETWORK}" \
--source-ranges "$2" \
--target-tags "$3" \
--allow tcp,udp,icmp,esp,ah,sctp; then
if (( attempt > 4 )); then
echo -e "${color_red}Failed to create firewall rule $1 ${color_norm}" >&2
exit 2
fi
echo -e "${color_yellow}Attempt $(($attempt+1)) failed to create firewall rule $1. Retrying.${color_norm}" >&2
attempt=$(($attempt+1))
sleep $(($attempt * 5))
else
break
fi
done
}
# Format the string argument for gcloud network.
function make-gcloud-network-argument() {
local network_project="$1"
local region="$2"
local network="$3"
local subnet="$4"
local address="$5" # optional
local enable_ip_alias="$6" # optional
local alias_size="$7" # optional
local networkURL="projects/${network_project}/global/networks/${network}"
local subnetURL="projects/${network_project}/regions/${region}/subnetworks/${subnet:-}"
local ret=""
if [[ "${enable_ip_alias}" == 'true' ]]; then
ret="--network-interface"
ret="${ret} network=${networkURL}"
# If address is omitted, instance will not receive an external IP.
ret="${ret},address=${address:-}"
ret="${ret},subnet=${subnetURL}"
ret="${ret},aliases=pods-default:${alias_size}"
ret="${ret} --no-can-ip-forward"
else
if [[ -n ${subnet:-} ]]; then
ret="${ret} --subnet ${subnetURL}"
else
ret="${ret} --network ${networkURL}"
fi
ret="${ret} --can-ip-forward"
if [[ -n ${address:-} ]]; then
ret="${ret} --address ${address}"
fi
fi
echo "${ret}"
}
# $1: version (required)
function get-template-name-from-version() {
# trim template name to pass gce name validation
echo "${NODE_INSTANCE_PREFIX}-template-${1}" | cut -c 1-63 | sed 's/[\.\+]/-/g;s/-*$//g'
}
# validates the NODE_LOCAL_SSDS_EXT variable
function validate-node-local-ssds-ext(){
ssdopts="${1}"
if [[ -z "${ssdopts[0]}" || -z "${ssdopts[1]}" || -z "${ssdopts[2]}" ]]; then
echo -e "${color_red}Local SSD: NODE_LOCAL_SSDS_EXT is malformed, found ${ssdopts[0]-_},${ssdopts[1]-_},${ssdopts[2]-_} ${color_norm}" >&2
exit 2
fi
if [[ "${ssdopts[1]}" != "scsi" && "${ssdopts[1]}" != "nvme" ]]; then
echo -e "${color_red}Local SSD: Interface must be scsi or nvme, found: ${ssdopts[1]} ${color_norm}" >&2
exit 2
fi
if [[ "${ssdopts[2]}" != "fs" && "${ssdopts[2]}" != "block" ]]; then
echo -e "${color_red}Local SSD: Filesystem type must be fs or block, found: ${ssdopts[2]} ${color_norm}" >&2
exit 2
fi
local_ssd_ext_count=$((local_ssd_ext_count+ssdopts[0]))
if [[ "${local_ssd_ext_count}" -gt "${GCE_MAX_LOCAL_SSD}" || "${local_ssd_ext_count}" -lt 1 ]]; then
echo -e "${color_red}Local SSD: Total number of local ssds must range from 1 to 8, found: ${local_ssd_ext_count} ${color_norm}" >&2
exit 2
fi
}
# Robustly try to create an instance template.
# $1: The name of the instance template.
# $2: The scopes flag.
# $3: String of comma-separated metadata entries (must all be from a file).
function create-node-template() {
detect-project
detect-subnetworks
local template_name="$1"
# First, ensure the template doesn't exist.
# TODO(zmerlynn): To make this really robust, we need to parse the output and
# add retries. Just relying on a non-zero exit code doesn't
# distinguish an ephemeral failed call from a "not-exists".
if gcloud compute instance-templates describe "$template_name" --project "${PROJECT}" &>/dev/null; then
echo "Instance template ${1} already exists; deleting." >&2
if ! gcloud compute instance-templates delete "$template_name" --project "${PROJECT}" --quiet &>/dev/null; then
echo -e "${color_yellow}Failed to delete existing instance template${color_norm}" >&2
exit 2
fi
fi
local gcloud="gcloud"
local accelerator_args=""
# VMs with Accelerators cannot be live migrated.
# More details here - https://cloud.google.com/compute/docs/gpus/add-gpus#create-new-gpu-instance
if [[ ! -z "${NODE_ACCELERATORS}" ]]; then
accelerator_args="--maintenance-policy TERMINATE --restart-on-failure --accelerator ${NODE_ACCELERATORS}"
gcloud="gcloud beta"
fi
local preemptible_minions=""
if [[ "${PREEMPTIBLE_NODE}" == "true" ]]; then
preemptible_minions="--preemptible --maintenance-policy TERMINATE"
fi
local local_ssds=""
local_ssd_ext_count=0
if [[ ! -z ${NODE_LOCAL_SSDS_EXT:-} ]]; then
IFS=";" read -r -a ssdgroups <<< "${NODE_LOCAL_SSDS_EXT:-}"
for ssdgroup in "${ssdgroups[@]}"
do
IFS="," read -r -a ssdopts <<< "${ssdgroup}"
validate-node-local-ssds-ext "${ssdopts}"
for i in $(seq ${ssdopts[0]}); do
local_ssds="$local_ssds--local-ssd=interface=${ssdopts[1]} "
done
done
fi
if [[ ! -z ${NODE_LOCAL_SSDS+x} ]]; then
# The NODE_LOCAL_SSDS check below fixes issue #49171
# Some versions of seq will count down from 1 if "seq 0" is specified
if [[ ${NODE_LOCAL_SSDS} -ge 1 ]]; then
for i in $(seq ${NODE_LOCAL_SSDS}); do
local_ssds="$local_ssds--local-ssd=interface=SCSI "
done
fi
fi
local network=$(make-gcloud-network-argument \
"${NETWORK_PROJECT}" \
"${REGION}" \
"${NETWORK}" \
"${SUBNETWORK:-}" \
"" \
"${ENABLE_IP_ALIASES:-}" \
"${IP_ALIAS_SIZE:-}")
local attempt=1
while true; do
echo "Attempt ${attempt} to create ${1}" >&2
if ! ${gcloud} compute instance-templates create \
"$template_name" \
--project "${PROJECT}" \
--machine-type "${NODE_SIZE}" \
--boot-disk-type "${NODE_DISK_TYPE}" \
--boot-disk-size "${NODE_DISK_SIZE}" \
--image-project="${NODE_IMAGE_PROJECT}" \
--image "${NODE_IMAGE}" \
--service-account "${NODE_SERVICE_ACCOUNT}" \
--tags "${NODE_TAG}" \
${accelerator_args} \
${local_ssds} \
--region "${REGION}" \
${network} \
${preemptible_minions} \
$2 \
--metadata-from-file $3 >&2; then
if (( attempt > 5 )); then
echo -e "${color_red}Failed to create instance template $template_name ${color_norm}" >&2
exit 2
fi
echo -e "${color_yellow}Attempt ${attempt} failed to create instance template $template_name. Retrying.${color_norm}" >&2
attempt=$(($attempt+1))
sleep $(($attempt * 5))
# In case the previous attempt failed with something like a
# Backend Error and left the entry laying around, delete it
# before we try again.
gcloud compute instance-templates delete "$template_name" --project "${PROJECT}" &>/dev/null || true
else
break
fi
done
}
# Instantiate a kubernetes cluster
#
# Assumed vars
# KUBE_ROOT
# <Various vars set in config file>
function kube-up() {
kube::util::ensure-temp-dir
detect-project
load-or-gen-kube-basicauth
load-or-gen-kube-bearertoken
# Make sure we have the tar files staged on Google Storage
find-release-tars
upload-server-tars
# ensure that environmental variables specifying number of migs to create
set_num_migs
if [[ ${KUBE_USE_EXISTING_MASTER:-} == "true" ]]; then
detect-master
parse-master-env
create-subnetworks
detect-subnetworks
create-nodes
elif [[ ${KUBE_REPLICATE_EXISTING_MASTER:-} == "true" ]]; then
if [[ "${MASTER_OS_DISTRIBUTION}" != "gci" && "${MASTER_OS_DISTRIBUTION}" != "ubuntu" ]]; then
echo "Master replication supported only for gci and ubuntu"
return 1
fi
create-loadbalancer
# If replication of master fails, we need to ensure that the replica is removed from etcd clusters.
if ! replicate-master; then
remove-replica-from-etcd 2379 || true
remove-replica-from-etcd 4002 || true
fi
else
check-existing
create-network
create-subnetworks
detect-subnetworks
write-cluster-location
write-cluster-name
create-autoscaler-config
create-master
create-nodes-firewall
create-nodes-template
create-nodes
check-cluster
fi
}
function check-existing() {
local running_in_terminal=false
# May be false if tty is not allocated (for example with ssh -T).
if [[ -t 1 ]]; then
running_in_terminal=true
fi
if [[ ${running_in_terminal} == "true" || ${KUBE_UP_AUTOMATIC_CLEANUP} == "true" ]]; then
if ! check-resources; then
local run_kube_down="n"
echo "${KUBE_RESOURCE_FOUND} found." >&2
# Get user input only if running in terminal.
if [[ ${running_in_terminal} == "true" && ${KUBE_UP_AUTOMATIC_CLEANUP} == "false" ]]; then
read -p "Would you like to shut down the old cluster (call kube-down)? [y/N] " run_kube_down
fi
if [[ ${run_kube_down} == "y" || ${run_kube_down} == "Y" || ${KUBE_UP_AUTOMATIC_CLEANUP} == "true" ]]; then
echo "... calling kube-down" >&2
kube-down
fi
fi
fi
}
function check-network-mode() {
local mode="$(gcloud compute networks list --filter="name=('${NETWORK}')" --project ${NETWORK_PROJECT} --format='value(x_gcloud_subnet_mode)' || true)"
# The deprecated field uses lower case. Convert to upper case for consistency.
echo "$(echo $mode | tr [a-z] [A-Z])"
}
function create-network() {
if ! gcloud compute networks --project "${NETWORK_PROJECT}" describe "${NETWORK}" &>/dev/null; then
# The network needs to be created synchronously or we have a race. The
# firewalls can be added concurrent with instance creation.
local network_mode="auto"
if [[ "${CREATE_CUSTOM_NETWORK:-}" == "true" ]]; then
network_mode="custom"
fi
echo "Creating new ${network_mode} network: ${NETWORK}"
gcloud compute networks create --project "${NETWORK_PROJECT}" "${NETWORK}" --subnet-mode="${network_mode}"
else
PREEXISTING_NETWORK=true
PREEXISTING_NETWORK_MODE="$(check-network-mode)"
echo "Found existing network ${NETWORK} in ${PREEXISTING_NETWORK_MODE} mode."
fi
if ! gcloud compute firewall-rules --project "${NETWORK_PROJECT}" describe "${CLUSTER_NAME}-default-internal-master" &>/dev/null; then
gcloud compute firewall-rules create "${CLUSTER_NAME}-default-internal-master" \
--project "${NETWORK_PROJECT}" \
--network "${NETWORK}" \
--source-ranges "10.0.0.0/8" \
--allow "tcp:1-2379,tcp:2382-65535,udp:1-65535,icmp" \
--target-tags "${MASTER_TAG}"&
fi
if ! gcloud compute firewall-rules --project "${NETWORK_PROJECT}" describe "${CLUSTER_NAME}-default-internal-node" &>/dev/null; then
gcloud compute firewall-rules create "${CLUSTER_NAME}-default-internal-node" \
--project "${NETWORK_PROJECT}" \
--network "${NETWORK}" \
--source-ranges "10.0.0.0/8" \
--allow "tcp:1-65535,udp:1-65535,icmp" \
--target-tags "${NODE_TAG}"&
fi
if ! gcloud compute firewall-rules describe --project "${NETWORK_PROJECT}" "${NETWORK}-default-ssh" &>/dev/null; then
gcloud compute firewall-rules create "${NETWORK}-default-ssh" \
--project "${NETWORK_PROJECT}" \
--network "${NETWORK}" \
--source-ranges "0.0.0.0/0" \
--allow "tcp:22" &
fi
}
function expand-default-subnetwork() {
gcloud compute networks update "${NETWORK}" \
--switch-to-custom-subnet-mode \
--project "${NETWORK_PROJECT}" \
--quiet || true
gcloud compute networks subnets expand-ip-range "${NETWORK}" \
--region="${REGION}" \
--project "${NETWORK_PROJECT}" \
--prefix-length=19 \
--quiet
}
function create-subnetworks() {
case ${ENABLE_IP_ALIASES} in
true) echo "IP aliases are enabled. Creating subnetworks.";;
false)
echo "IP aliases are disabled."
if [[ "${ENABLE_BIG_CLUSTER_SUBNETS}" = "true" ]]; then
if [[ "${PREEXISTING_NETWORK}" != "true" ]]; then
expand-default-subnetwork
else
echo "${color_yellow}Using pre-existing network ${NETWORK}, subnets won't be expanded to /19!${color_norm}"
fi
elif [[ "${CREATE_CUSTOM_NETWORK:-}" == "true" && "${PREEXISTING_NETWORK}" != "true" ]]; then
gcloud compute networks subnets create "${SUBNETWORK}" --project "${NETWORK_PROJECT}" --region "${REGION}" --network "${NETWORK}" --range "${NODE_IP_RANGE}"
fi
return;;
*) echo "${color_red}Invalid argument to ENABLE_IP_ALIASES${color_norm}"
exit 1;;
esac
# Look for the alias subnet, it must exist and have a secondary
# range configured.
local subnet=$(gcloud compute networks subnets describe \
--project "${NETWORK_PROJECT}" \
--region ${REGION} \
${IP_ALIAS_SUBNETWORK} 2>/dev/null)
if [[ -z ${subnet} ]]; then
echo "Creating subnet ${NETWORK}:${IP_ALIAS_SUBNETWORK}"
gcloud compute networks subnets create \
${IP_ALIAS_SUBNETWORK} \
--description "Automatically generated subnet for ${INSTANCE_PREFIX} cluster. This will be removed on cluster teardown." \
--project "${NETWORK_PROJECT}" \
--network ${NETWORK} \
--region ${REGION} \
--range ${NODE_IP_RANGE} \
--secondary-range "pods-default=${CLUSTER_IP_RANGE}" \
--secondary-range "services-default=${SERVICE_CLUSTER_IP_RANGE}"
echo "Created subnetwork ${IP_ALIAS_SUBNETWORK}"
else
if ! echo ${subnet} | grep --quiet secondaryIpRanges; then
echo "${color_red}Subnet ${IP_ALIAS_SUBNETWORK} does not have a secondary range${color_norm}"
exit 1
fi
fi
}
# detect-subnetworks sets the SUBNETWORK var if not already set
# Assumed vars:
# NETWORK
# REGION
# NETWORK_PROJECT
#
# Optional vars:
# SUBNETWORK
# IP_ALIAS_SUBNETWORK
function detect-subnetworks() {
if [[ -n ${SUBNETWORK:-} ]]; then
echo "Using subnet ${SUBNETWORK}"
return 0
fi
if [[ -n ${IP_ALIAS_SUBNETWORK:-} ]]; then
SUBNETWORK=${IP_ALIAS_SUBNETWORK}
echo "Using IP Alias subnet ${SUBNETWORK}"
return 0
fi
SUBNETWORK=$(gcloud compute networks subnets list \
--network=${NETWORK} \
--regions=${REGION} \
--project=${NETWORK_PROJECT} \
--limit=1 \
--format='value(name)' 2>/dev/null)
if [[ -n ${SUBNETWORK:-} ]]; then
echo "Found subnet for region ${REGION} in network ${NETWORK}: ${SUBNETWORK}"
return 0
fi
echo "${color_red}Could not find subnetwork with region ${REGION}, network ${NETWORK}, and project ${NETWORK_PROJECT}"
}
function delete-all-firewall-rules() {
if fws=$(gcloud compute firewall-rules list --project "${NETWORK_PROJECT}" --filter="network=${NETWORK}" --format="value(name)"); then
echo "Deleting firewall rules remaining in network ${NETWORK}: ${fws}"
delete-firewall-rules "$fws"
else
echo "Failed to list firewall rules from the network ${NETWORK}"
fi
}
function delete-firewall-rules() {
for fw in $@; do
if [[ -n $(gcloud compute firewall-rules --project "${NETWORK_PROJECT}" describe "${fw}" --format='value(name)' 2>/dev/null || true) ]]; then
gcloud compute firewall-rules delete --project "${NETWORK_PROJECT}" --quiet "${fw}" &
fi
done
kube::util::wait-for-jobs || {
echo -e "${color_red}Failed to delete firewall rules.${color_norm}" >&2
}
}
function delete-network() {
if [[ -n $(gcloud compute networks --project "${NETWORK_PROJECT}" describe "${NETWORK}" --format='value(name)' 2>/dev/null || true) ]]; then
if ! gcloud compute networks delete --project "${NETWORK_PROJECT}" --quiet "${NETWORK}"; then
echo "Failed to delete network '${NETWORK}'. Listing firewall-rules:"
gcloud compute firewall-rules --project "${NETWORK_PROJECT}" list --filter="network=${NETWORK}"
return 1
fi
fi
}
function delete-subnetworks() {
# If running in custom mode network we need to delete subnets manually.
mode="$(check-network-mode)"
if [[ "${mode}" == "CUSTOM" ]]; then
if [[ "${ENABLE_BIG_CLUSTER_SUBNETS}" = "true" ]]; then
echo "Deleting default subnets..."
# This value should be kept in sync with number of regions.
local parallelism=9
gcloud compute networks subnets list --network="${NETWORK}" --project "${NETWORK_PROJECT}" --format='value(region.basename())' | \
xargs -i -P ${parallelism} gcloud --quiet compute networks subnets delete "${NETWORK}" --project "${NETWORK_PROJECT}" --region="{}" || true
elif [[ "${CREATE_CUSTOM_NETWORK:-}" == "true" ]]; then
echo "Deleting custom subnet..."
gcloud --quiet compute networks subnets delete "${SUBNETWORK}" --project "${NETWORK_PROJECT}" --region="${REGION}" || true
fi
return
fi
# If we reached here, it means we're not using custom network.
# So the only thing we need to check is if IP-aliases was turned
# on and we created a subnet for it. If so, we should delete it.
if [[ ${ENABLE_IP_ALIASES:-} == "true" ]]; then
# Only delete the subnet if we created it (i.e it's not pre-existing).
if [[ -z "${KUBE_GCE_IP_ALIAS_SUBNETWORK:-}" ]]; then
echo "Removing auto-created subnet ${NETWORK}:${IP_ALIAS_SUBNETWORK}"
if [[ -n $(gcloud compute networks subnets describe \
--project "${NETWORK_PROJECT}" \
--region ${REGION} \
${IP_ALIAS_SUBNETWORK} 2>/dev/null) ]]; then
gcloud --quiet compute networks subnets delete \
--project "${NETWORK_PROJECT}" \
--region ${REGION} \
${IP_ALIAS_SUBNETWORK}
fi
fi
fi
}
# Generates SSL certificates for etcd cluster. Uses cfssl program.
#
# Assumed vars:
# KUBE_TEMP: temporary directory
# NUM_NODES: #nodes in the cluster
#
# Args:
# $1: host name
# $2: CA certificate
# $3: CA key
#
# If CA cert/key is empty, the function will also generate certs for CA.
#
# Vars set:
# ETCD_CA_KEY_BASE64
# ETCD_CA_CERT_BASE64
# ETCD_PEER_KEY_BASE64
# ETCD_PEER_CERT_BASE64
#
function create-etcd-certs {
local host=${1}
local ca_cert=${2:-}
local ca_key=${3:-}
GEN_ETCD_CA_CERT="${ca_cert}" GEN_ETCD_CA_KEY="${ca_key}" \
generate-etcd-cert "${KUBE_TEMP}/cfssl" "${host}" "peer" "peer"
pushd "${KUBE_TEMP}/cfssl"
ETCD_CA_KEY_BASE64=$(cat "ca-key.pem" | base64 | tr -d '\r\n')
ETCD_CA_CERT_BASE64=$(cat "ca.pem" | gzip | base64 | tr -d '\r\n')
ETCD_PEER_KEY_BASE64=$(cat "peer-key.pem" | base64 | tr -d '\r\n')
ETCD_PEER_CERT_BASE64=$(cat "peer.pem" | gzip | base64 | tr -d '\r\n')
popd
}
function create-master() {
echo "Starting master and configuring firewalls"
gcloud compute firewall-rules create "${MASTER_NAME}-https" \
--project "${NETWORK_PROJECT}" \
--network "${NETWORK}" \
--target-tags "${MASTER_TAG}" \
--allow tcp:443 &
# We have to make sure the disk is created before creating the master VM, so
# run this in the foreground.
gcloud compute disks create "${MASTER_NAME}-pd" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--type "${MASTER_DISK_TYPE}" \
--size "${MASTER_DISK_SIZE}"
# Create rule for accessing and securing etcd servers.
if ! gcloud compute firewall-rules --project "${NETWORK_PROJECT}" describe "${MASTER_NAME}-etcd" &>/dev/null; then
gcloud compute firewall-rules create "${MASTER_NAME}-etcd" \
--project "${NETWORK_PROJECT}" \
--network "${NETWORK}" \
--source-tags "${MASTER_TAG}" \
--allow "tcp:2380,tcp:2381" \
--target-tags "${MASTER_TAG}" &
fi
# Generate a bearer token for this cluster. We push this separately
# from the other cluster variables so that the client (this
# computer) can forget it later. This should disappear with
# http://issue.k8s.io/3168
KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
NODE_PROBLEM_DETECTOR_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
fi
# Reserve the master's IP so that it can later be transferred to another VM
# without disrupting the kubelets.
create-static-ip "${MASTER_NAME}-ip" "${REGION}"
MASTER_RESERVED_IP=$(gcloud compute addresses describe "${MASTER_NAME}-ip" \
--project "${PROJECT}" --region "${REGION}" -q --format='value(address)')
if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" ]]; then
KUBELET_APISERVER="${MASTER_RESERVED_IP}"
fi
KUBERNETES_MASTER_NAME="${MASTER_RESERVED_IP}"
MASTER_ADVERTISE_ADDRESS="${MASTER_RESERVED_IP}"
create-certs "${MASTER_RESERVED_IP}"
create-etcd-certs ${MASTER_NAME}
if [[ "${NUM_NODES}" -ge "50" ]]; then
# We block on master creation for large clusters to avoid doing too much
# unnecessary work in case master start-up fails (like creation of nodes).
create-master-instance "${MASTER_RESERVED_IP}"
else
create-master-instance "${MASTER_RESERVED_IP}" &
fi
}
# Adds master replica to etcd cluster.
#
# Assumed vars:
# REPLICA_NAME
# PROJECT
# EXISTING_MASTER_NAME
# EXISTING_MASTER_ZONE
#
# $1: etcd client port
# $2: etcd internal port
# returns the result of ssh command which adds replica
function add-replica-to-etcd() {
local -r client_port="${1}"
local -r internal_port="${2}"
gcloud compute ssh "${EXISTING_MASTER_NAME}" \
--project "${PROJECT}" \
--zone "${EXISTING_MASTER_ZONE}" \
--command \
"curl localhost:${client_port}/v2/members -XPOST -H \"Content-Type: application/json\" -d '{\"peerURLs\":[\"https://${REPLICA_NAME}:${internal_port}\"]}' -s"
return $?
}
# Sets EXISTING_MASTER_NAME and EXISTING_MASTER_ZONE variables.
#
# Assumed vars:
# PROJECT
#
# NOTE: Must be in sync with get-replica-name-regexp
function set-existing-master() {
local existing_master=$(gcloud compute instances list \
--project "${PROJECT}" \
--filter "name ~ '$(get-replica-name-regexp)'" \
--format "value(name,zone)" | head -n1)
EXISTING_MASTER_NAME="$(echo "${existing_master}" | cut -f1)"
EXISTING_MASTER_ZONE="$(echo "${existing_master}" | cut -f2)"
}
function replicate-master() {
set-replica-name
set-existing-master
echo "Experimental: replicating existing master ${EXISTING_MASTER_ZONE}/${EXISTING_MASTER_NAME} as ${ZONE}/${REPLICA_NAME}"
# Before we do anything else, we should configure etcd to expect more replicas.
if ! add-replica-to-etcd 2379 2380; then
echo "Failed to add master replica to etcd cluster."
return 1
fi
if ! add-replica-to-etcd 4002 2381; then
echo "Failed to add master replica to etcd events cluster."
return 1
fi
# We have to make sure the disk is created before creating the master VM, so
# run this in the foreground.
gcloud compute disks create "${REPLICA_NAME}-pd" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--type "${MASTER_DISK_TYPE}" \
--size "${MASTER_DISK_SIZE}"
local existing_master_replicas="$(get-all-replica-names)"
replicate-master-instance "${EXISTING_MASTER_ZONE}" "${EXISTING_MASTER_NAME}" "${existing_master_replicas}"
# Add new replica to the load balancer.
gcloud compute target-pools add-instances "${MASTER_NAME}" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--instances "${REPLICA_NAME}"
}
# Detaches old and ataches new external IP to a VM.
#
# Arguments:
# $1 - VM name
# $2 - VM zone
# $3 - external static IP; if empty will use an ephemeral IP address.
function attach-external-ip() {
local NAME=${1}
local ZONE=${2}
local IP_ADDR=${3:-}
local ACCESS_CONFIG_NAME=$(gcloud compute instances describe "${NAME}" \
--project "${PROJECT}" --zone "${ZONE}" \
--format="value(networkInterfaces[0].accessConfigs[0].name)")
gcloud compute instances delete-access-config "${NAME}" \
--project "${PROJECT}" --zone "${ZONE}" \
--access-config-name "${ACCESS_CONFIG_NAME}"
if [[ -z ${IP_ADDR} ]]; then
gcloud compute instances add-access-config "${NAME}" \
--project "${PROJECT}" --zone "${ZONE}" \
--access-config-name "${ACCESS_CONFIG_NAME}"
else
gcloud compute instances add-access-config "${NAME}" \
--project "${PROJECT}" --zone "${ZONE}" \
--access-config-name "${ACCESS_CONFIG_NAME}" \
--address "${IP_ADDR}"
fi
}
# Creates load balancer in front of apiserver if it doesn't exists already. Assumes there's only one
# existing master replica.
#
# Assumes:
# PROJECT
# MASTER_NAME
# ZONE
# REGION
function create-loadbalancer() {
detect-master
# Step 0: Return early if LB is already configured.
if gcloud compute forwarding-rules describe ${MASTER_NAME} \
--project "${PROJECT}" --region ${REGION} > /dev/null 2>&1; then
echo "Load balancer already exists"
return
fi
local EXISTING_MASTER_NAME="$(get-all-replica-names)"
local EXISTING_MASTER_ZONE=$(gcloud compute instances list "${EXISTING_MASTER_NAME}" \
--project "${PROJECT}" --format="value(zone)")
echo "Creating load balancer in front of an already existing master in ${EXISTING_MASTER_ZONE}"
# Step 1: Detach master IP address and attach ephemeral address to the existing master
attach-external-ip "${EXISTING_MASTER_NAME}" "${EXISTING_MASTER_ZONE}"
# Step 2: Create target pool.
gcloud compute target-pools create "${MASTER_NAME}" --project "${PROJECT}" --region "${REGION}"
# TODO: We should also add master instances with suffixes
gcloud compute target-pools add-instances "${MASTER_NAME}" --instances "${EXISTING_MASTER_NAME}" --project "${PROJECT}" --zone "${EXISTING_MASTER_ZONE}"
# Step 3: Create forwarding rule.
# TODO: This step can take up to 20 min. We need to speed this up...
gcloud compute forwarding-rules create ${MASTER_NAME} \
--project "${PROJECT}" --region ${REGION} \
--target-pool ${MASTER_NAME} --address=${KUBE_MASTER_IP} --ports=443
echo -n "Waiting for the load balancer configuration to propagate..."
local counter=0
until $(curl -k -m1 https://${KUBE_MASTER_IP} &> /dev/null); do
counter=$((counter+1))
echo -n .
if [[ ${counter} -ge 1800 ]]; then
echo -e "${color_red}TIMEOUT${color_norm}" >&2
echo -e "${color_red}Load balancer failed to initialize within ${counter} seconds.${color_norm}" >&2
exit 2
fi
done
echo "DONE"
}
function create-nodes-firewall() {
# Create a single firewall rule for all minions.
create-firewall-rule "${NODE_TAG}-all" "${CLUSTER_IP_RANGE}" "${NODE_TAG}" &
# Report logging choice (if any).
if [[ "${ENABLE_NODE_LOGGING-}" == "true" ]]; then
echo "+++ Logging using Fluentd to ${LOGGING_DESTINATION:-unknown}"
fi
# Wait for last batch of jobs
kube::util::wait-for-jobs || {
echo -e "${color_red}Some commands failed.${color_norm}" >&2
}
}
function get-scope-flags() {
local scope_flags=
if [[ -n "${NODE_SCOPES}" ]]; then
scope_flags="--scopes ${NODE_SCOPES}"
else
scope_flags="--no-scopes"
fi
echo "${scope_flags}"
}
function create-nodes-template() {
echo "Creating nodes."
local scope_flags=$(get-scope-flags)
write-node-env
local template_name="${NODE_INSTANCE_PREFIX}-template"
create-node-instance-template $template_name
}
# Assumes:
# - MAX_INSTANCES_PER_MIG
# - NUM_NODES
# exports:
# - NUM_MIGS
function set_num_migs() {
local defaulted_max_instances_per_mig=${MAX_INSTANCES_PER_MIG:-1000}
if [[ ${defaulted_max_instances_per_mig} -le "0" ]]; then
echo "MAX_INSTANCES_PER_MIG cannot be negative. Assuming default 1000"
defaulted_max_instances_per_mig=1000
fi
export NUM_MIGS=$(((${NUM_NODES} + ${defaulted_max_instances_per_mig} - 1) / ${defaulted_max_instances_per_mig}))
}
# Assumes:
# - NUM_MIGS
# - NODE_INSTANCE_PREFIX
# - NUM_NODES
# - PROJECT
# - ZONE
function create-nodes() {
local template_name="${NODE_INSTANCE_PREFIX}-template"
if [[ -z "${HEAPSTER_MACHINE_TYPE:-}" ]]; then
local -r nodes="${NUM_NODES}"
else
echo "Creating a special node for heapster with machine-type ${HEAPSTER_MACHINE_TYPE}"
create-heapster-node
local -r nodes=$(( NUM_NODES - 1 ))
fi
local instances_left=${nodes}
#TODO: parallelize this loop to speed up the process
for ((i=1; i<=${NUM_MIGS}; i++)); do
local group_name="${NODE_INSTANCE_PREFIX}-group-$i"
if [[ $i == ${NUM_MIGS} ]]; then
# TODO: We don't add a suffix for the last group to keep backward compatibility when there's only one MIG.
# We should change it at some point, but note #18545 when changing this.
group_name="${NODE_INSTANCE_PREFIX}-group"
fi
# Spread the remaining number of nodes evenly
this_mig_size=$((${instances_left} / (${NUM_MIGS}-${i}+1)))
instances_left=$((instances_left-${this_mig_size}))
gcloud compute instance-groups managed \
create "${group_name}" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--base-instance-name "${group_name}" \
--size "${this_mig_size}" \
--template "$template_name" || true;
gcloud compute instance-groups managed wait-until-stable \
"${group_name}" \
--zone "${ZONE}" \
--project "${PROJECT}" \
--timeout "${MIG_WAIT_UNTIL_STABLE_TIMEOUT}" || true;
done
}
# Assumes:
# - NODE_INSTANCE_PREFIX
# - PROJECT
# - NETWORK_PROJECT
# - REGION
# - ZONE
# - HEAPSTER_MACHINE_TYPE
# - NODE_DISK_TYPE
# - NODE_DISK_SIZE
# - NODE_IMAGE_PROJECT
# - NODE_IMAGE
# - NODE_SERVICE_ACCOUNT
# - NODE_TAG
# - NETWORK
# - ENABLE_IP_ALIASES
# - SUBNETWORK
# - IP_ALIAS_SIZE
function create-heapster-node() {
local gcloud="gcloud"
local network=$(make-gcloud-network-argument \
"${NETWORK_PROJECT}" \
"${REGION}" \
"${NETWORK}" \
"${SUBNETWORK:-}" \
"" \
"${ENABLE_IP_ALIASES:-}" \
"${IP_ALIAS_SIZE:-}")
${gcloud} compute instances \
create "${NODE_INSTANCE_PREFIX}-heapster" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--machine-type="${HEAPSTER_MACHINE_TYPE}" \
--boot-disk-type "${NODE_DISK_TYPE}" \
--boot-disk-size "${NODE_DISK_SIZE}" \
--image-project="${NODE_IMAGE_PROJECT}" \
--image "${NODE_IMAGE}" \
--service-account "${NODE_SERVICE_ACCOUNT}" \
--tags "${NODE_TAG}" \
${network} \
$(get-scope-flags) \
--metadata-from-file "$(get-node-instance-metadata)"
}
# Assumes:
# - NUM_MIGS
# - NODE_INSTANCE_PREFIX
# - PROJECT
# - ZONE
# - AUTOSCALER_MAX_NODES
# - AUTOSCALER_MIN_NODES
# Exports
# - AUTOSCALER_MIG_CONFIG
function create-cluster-autoscaler-mig-config() {
# Each MIG must have at least one node, so the min number of nodes
# must be greater or equal to the number of migs.
if [[ ${AUTOSCALER_MIN_NODES} -lt 0 ]]; then
echo "AUTOSCALER_MIN_NODES must be greater or equal 0"
exit 2
fi
# Each MIG must have at least one node, so the min number of nodes
# must be greater or equal to the number of migs.
if [[ ${AUTOSCALER_MAX_NODES} -lt ${NUM_MIGS} ]]; then
echo "AUTOSCALER_MAX_NODES must be greater or equal ${NUM_MIGS}"
exit 2
fi
# The code assumes that the migs were created with create-nodes
# function which tries to evenly spread nodes across the migs.
AUTOSCALER_MIG_CONFIG=""
local left_min=${AUTOSCALER_MIN_NODES}
local left_max=${AUTOSCALER_MAX_NODES}
for ((i=1; i<=${NUM_MIGS}; i++)); do
local group_name="${NODE_INSTANCE_PREFIX}-group-$i"
if [[ $i == ${NUM_MIGS} ]]; then
# TODO: We don't add a suffix for the last group to keep backward compatibility when there's only one MIG.
# We should change it at some point, but note #18545 when changing this.
group_name="${NODE_INSTANCE_PREFIX}-group"
fi
this_mig_min=$((${left_min}/(${NUM_MIGS}-${i}+1)))
this_mig_max=$((${left_max}/(${NUM_MIGS}-${i}+1)))
left_min=$((left_min-$this_mig_min))
left_max=$((left_max-$this_mig_max))
local mig_url="https://www.googleapis.com/compute/v1/projects/${PROJECT}/zones/${ZONE}/instanceGroups/${group_name}"
AUTOSCALER_MIG_CONFIG="${AUTOSCALER_MIG_CONFIG} --nodes=${this_mig_min}:${this_mig_max}:${mig_url}"
done
AUTOSCALER_MIG_CONFIG="${AUTOSCALER_MIG_CONFIG} --scale-down-enabled=${AUTOSCALER_ENABLE_SCALE_DOWN}"
}
# Assumes:
# - NUM_MIGS
# - NODE_INSTANCE_PREFIX
# - PROJECT
# - ZONE
# - ENABLE_CLUSTER_AUTOSCALER
# - AUTOSCALER_MAX_NODES
# - AUTOSCALER_MIN_NODES
function create-autoscaler-config() {
# Create autoscaler for nodes configuration if requested
if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
create-cluster-autoscaler-mig-config
echo "Using autoscaler config: ${AUTOSCALER_MIG_CONFIG} ${AUTOSCALER_EXPANDER_CONFIG}"
fi
}
function check-cluster() {
detect-node-names
detect-master
echo "Waiting up to ${KUBE_CLUSTER_INITIALIZATION_TIMEOUT} seconds for cluster initialization."
echo
echo " This will continually check to see if the API for kubernetes is reachable."
echo " This may time out if there was some uncaught error during start up."
echo
# curl in mavericks is borked.
secure=""
if which sw_vers >& /dev/null; then
if [[ $(sw_vers | grep ProductVersion | awk '{print $2}') = "10.9."* ]]; then
secure="--insecure"
fi
fi
local start_time=$(date +%s)
local curl_out=$(mktemp)
kube::util::trap_add "rm -f ${curl_out}" EXIT
until curl --cacert "${CERT_DIR}/pki/ca.crt" \
-H "Authorization: Bearer ${KUBE_BEARER_TOKEN}" \
${secure} \
--max-time 5 --fail \
"https://${KUBE_MASTER_IP}/api/v1/pods?limit=100" > "${curl_out}" 2>&1; do
local elapsed=$(($(date +%s) - ${start_time}))
if [[ ${elapsed} -gt ${KUBE_CLUSTER_INITIALIZATION_TIMEOUT} ]]; then
echo -e "${color_red}Cluster failed to initialize within ${KUBE_CLUSTER_INITIALIZATION_TIMEOUT} seconds.${color_norm}" >&2
echo "Last output from querying API server follows:" >&2
echo "-----------------------------------------------------" >&2
cat "${curl_out}" >&2
echo "-----------------------------------------------------" >&2
exit 2
fi
printf "."
sleep 2
done
echo "Kubernetes cluster created."
export KUBE_CERT="${CERT_DIR}/pki/issued/kubecfg.crt"
export KUBE_KEY="${CERT_DIR}/pki/private/kubecfg.key"
export CA_CERT="${CERT_DIR}/pki/ca.crt"
export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}"
(
umask 077
# Update the user's kubeconfig to include credentials for this apiserver.
create-kubeconfig
)
# ensures KUBECONFIG is set
get-kubeconfig-basicauth
echo
echo -e "${color_green}Kubernetes cluster is running. The master is running at:"
echo
echo -e "${color_yellow} https://${KUBE_MASTER_IP}"
echo
echo -e "${color_green}The user name and password to use is located in ${KUBECONFIG}.${color_norm}"
echo
}
# Removes master replica from etcd cluster.
#
# Assumed vars:
# REPLICA_NAME
# PROJECT
# EXISTING_MASTER_NAME
# EXISTING_MASTER_ZONE
#
# $1: etcd client port
# returns the result of ssh command which removes replica
function remove-replica-from-etcd() {
local -r port="${1}"
[[ -n "${EXISTING_MASTER_NAME}" ]] || return
gcloud compute ssh "${EXISTING_MASTER_NAME}" \
--project "${PROJECT}" \
--zone "${EXISTING_MASTER_ZONE}" \
--command \
"curl -s localhost:${port}/v2/members/\$(curl -s localhost:${port}/v2/members -XGET | sed 's/{\\\"id/\n/g' | grep ${REPLICA_NAME}\\\" | cut -f 3 -d \\\") -XDELETE -L 2>/dev/null"
local -r res=$?
echo "Removing etcd replica, name: ${REPLICA_NAME}, port: ${port}, result: ${res}"
return "${res}"
}
# Delete a kubernetes cluster. This is called from test-teardown.
#
# Assumed vars:
# MASTER_NAME
# NODE_INSTANCE_PREFIX
# ZONE
# This function tears down cluster resources 10 at a time to avoid issuing too many
# API calls and exceeding API quota. It is important to bring down the instances before bringing
# down the firewall rules and routes.
function kube-down() {
local -r batch=200
detect-project
detect-node-names # For INSTANCE_GROUPS
echo "Bringing down cluster"
set +e # Do not stop on error
if [[ "${KUBE_DELETE_NODES:-}" != "false" ]]; then
# Get the name of the managed instance group template before we delete the
# managed instance group. (The name of the managed instance group template may
# change during a cluster upgrade.)
local templates=$(get-template "${PROJECT}")
for group in ${INSTANCE_GROUPS[@]:-}; do
if gcloud compute instance-groups managed describe "${group}" --project "${PROJECT}" --zone "${ZONE}" &>/dev/null; then
gcloud compute instance-groups managed delete \
--project "${PROJECT}" \
--quiet \
--zone "${ZONE}" \
"${group}" &
fi
done
# Wait for last batch of jobs
kube::util::wait-for-jobs || {
echo -e "Failed to delete instance group(s)." >&2
}
for template in ${templates[@]:-}; do
if gcloud compute instance-templates describe --project "${PROJECT}" "${template}" &>/dev/null; then
gcloud compute instance-templates delete \
--project "${PROJECT}" \
--quiet \
"${template}"
fi
done
# Delete the special heapster node (if it exists).
if [[ -n "${HEAPSTER_MACHINE_TYPE:-}" ]]; then
local -r heapster_machine_name="${NODE_INSTANCE_PREFIX}-heapster"
if gcloud compute instances describe "${heapster_machine_name}" --zone "${ZONE}" --project "${PROJECT}" &>/dev/null; then
# Now we can safely delete the VM.
gcloud compute instances delete \
--project "${PROJECT}" \
--quiet \
--delete-disks all \
--zone "${ZONE}" \
"${heapster_machine_name}"
fi
fi
fi
local -r REPLICA_NAME="${KUBE_REPLICA_NAME:-$(get-replica-name)}"
set-existing-master
# Un-register the master replica from etcd and events etcd.
remove-replica-from-etcd 2379
remove-replica-from-etcd 4002
# Delete the master replica (if it exists).
if gcloud compute instances describe "${REPLICA_NAME}" --zone "${ZONE}" --project "${PROJECT}" &>/dev/null; then
# If there is a load balancer in front of apiservers we need to first update its configuration.
if gcloud compute target-pools describe "${MASTER_NAME}" --region "${REGION}" --project "${PROJECT}" &>/dev/null; then
gcloud compute target-pools remove-instances "${MASTER_NAME}" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--instances "${REPLICA_NAME}"
fi
# Now we can safely delete the VM.
gcloud compute instances delete \
--project "${PROJECT}" \
--quiet \
--delete-disks all \
--zone "${ZONE}" \
"${REPLICA_NAME}"
fi
# Delete the master replica pd (possibly leaked by kube-up if master create failed).
# TODO(jszczepkowski): remove also possibly leaked replicas' pds
local -r replica_pd="${REPLICA_NAME:-${MASTER_NAME}}-pd"
if gcloud compute disks describe "${replica_pd}" --zone "${ZONE}" --project "${PROJECT}" &>/dev/null; then
gcloud compute disks delete \
--project "${PROJECT}" \
--quiet \
--zone "${ZONE}" \
"${replica_pd}"
fi
# Check if this are any remaining master replicas.
local REMAINING_MASTER_COUNT=$(gcloud compute instances list \
--project "${PROJECT}" \
--filter="name ~ '$(get-replica-name-regexp)'" \
--format "value(zone)" | wc -l)
# In the replicated scenario, if there's only a single master left, we should also delete load balancer in front of it.
if [[ "${REMAINING_MASTER_COUNT}" -eq 1 ]]; then
if gcloud compute forwarding-rules describe "${MASTER_NAME}" --region "${REGION}" --project "${PROJECT}" &>/dev/null; then
detect-master
local REMAINING_REPLICA_NAME="$(get-all-replica-names)"
local REMAINING_REPLICA_ZONE=$(gcloud compute instances list "${REMAINING_REPLICA_NAME}" \
--project "${PROJECT}" --format="value(zone)")
gcloud compute forwarding-rules delete \
--project "${PROJECT}" \
--region "${REGION}" \
--quiet \
"${MASTER_NAME}"
attach-external-ip "${REMAINING_REPLICA_NAME}" "${REMAINING_REPLICA_ZONE}" "${KUBE_MASTER_IP}"
gcloud compute target-pools delete \
--project "${PROJECT}" \
--region "${REGION}" \
--quiet \
"${MASTER_NAME}"
fi
fi
# If there are no more remaining master replicas, we should delete all remaining network resources.
if [[ "${REMAINING_MASTER_COUNT}" -eq 0 ]]; then
# Delete firewall rule for the master, etcd servers, and nodes.
delete-firewall-rules "${MASTER_NAME}-https" "${MASTER_NAME}-etcd" "${NODE_TAG}-all"
# Delete the master's reserved IP
if gcloud compute addresses describe "${MASTER_NAME}-ip" --region "${REGION}" --project "${PROJECT}" &>/dev/null; then
gcloud compute addresses delete \
--project "${PROJECT}" \
--region "${REGION}" \
--quiet \
"${MASTER_NAME}-ip"
fi
fi
if [[ "${KUBE_DELETE_NODES:-}" != "false" ]]; then
# Find out what minions are running.
local -a minions
minions=( $(gcloud compute instances list \
--project "${PROJECT}" \
--filter="name ~ '${NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \
--format='value(name)') )
# If any minions are running, delete them in batches.
while (( "${#minions[@]}" > 0 )); do
echo Deleting nodes "${minions[*]::${batch}}"
gcloud compute instances delete \
--project "${PROJECT}" \
--quiet \
--delete-disks boot \
--zone "${ZONE}" \
"${minions[@]::${batch}}"
minions=( "${minions[@]:${batch}}" )
done
fi
# If there are no more remaining master replicas: delete routes, pd for influxdb and update kubeconfig
if [[ "${REMAINING_MASTER_COUNT}" -eq 0 ]]; then
# Delete routes.
local -a routes
# Clean up all routes w/ names like "<cluster-name>-<node-GUID>"
# e.g. "kubernetes-12345678-90ab-cdef-1234-567890abcdef". The name is
# determined by the node controller on the master.
# Note that this is currently a noop, as synchronously deleting the node MIG
# first allows the master to cleanup routes itself.
local TRUNCATED_PREFIX="${INSTANCE_PREFIX:0:26}"
routes=( $(gcloud compute routes list --project "${NETWORK_PROJECT}" \
--filter="name ~ '${TRUNCATED_PREFIX}-.{8}-.{4}-.{4}-.{4}-.{12}'" \
--format='value(name)') )
while (( "${#routes[@]}" > 0 )); do
echo Deleting routes "${routes[*]::${batch}}"
gcloud compute routes delete \
--project "${NETWORK_PROJECT}" \
--quiet \
"${routes[@]::${batch}}"
routes=( "${routes[@]:${batch}}" )
done
# Delete persistent disk for influx-db.
if gcloud compute disks describe "${INSTANCE_PREFIX}"-influxdb-pd --zone "${ZONE}" --project "${PROJECT}" &>/dev/null; then
gcloud compute disks delete \
--project "${PROJECT}" \
--quiet \
--zone "${ZONE}" \
"${INSTANCE_PREFIX}"-influxdb-pd
fi
# Delete all remaining firewall rules and network.
delete-firewall-rules \
"${CLUSTER_NAME}-default-internal-master" \
"${CLUSTER_NAME}-default-internal-node" \
"${NETWORK}-default-ssh" \
"${NETWORK}-default-internal" # Pre-1.5 clusters
if [[ "${KUBE_DELETE_NETWORK}" == "true" ]]; then
# Delete all remaining firewall rules in the network.
delete-all-firewall-rules || true
delete-subnetworks || true
delete-network || true # might fail if there are leaked resources that reference the network
fi
# If there are no more remaining master replicas, we should update kubeconfig.
export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}"
clear-kubeconfig
else
# If some master replicas remain: cluster has been changed, we need to re-validate it.
echo "... calling validate-cluster" >&2
# Override errexit
(validate-cluster) && validate_result="$?" || validate_result="$?"
# We have two different failure modes from validate cluster:
# - 1: fatal error - cluster won't be working correctly
# - 2: weak error - something went wrong, but cluster probably will be working correctly
# We just print an error message in case 2).
if [[ "${validate_result}" -eq 1 ]]; then
exit 1
elif [[ "${validate_result}" -eq 2 ]]; then
echo "...ignoring non-fatal errors in validate-cluster" >&2
fi
fi
set -e
}
# Prints name of one of the master replicas in the current zone. It will be either
# just MASTER_NAME or MASTER_NAME with a suffix for a replica (see get-replica-name-regexp).
#
# Assumed vars:
# PROJECT
# ZONE
# MASTER_NAME
#
# NOTE: Must be in sync with get-replica-name-regexp and set-replica-name.
function get-replica-name() {
echo $(gcloud compute instances list \
--project "${PROJECT}" \
--filter="name ~ '$(get-replica-name-regexp)' AND zone:(${ZONE})" \
--format "value(name)" | head -n1)
}
# Prints comma-separated names of all of the master replicas in all zones.
#
# Assumed vars:
# PROJECT
# MASTER_NAME
#
# NOTE: Must be in sync with get-replica-name-regexp and set-replica-name.
function get-all-replica-names() {
echo $(gcloud compute instances list \
--project "${PROJECT}" \
--filter="name ~ '$(get-replica-name-regexp)'" \
--format "value(name)" | tr "\n" "," | sed 's/,$//')
}
# Prints the number of all of the master replicas in all zones.
#
# Assumed vars:
# MASTER_NAME
function get-master-replicas-count() {
detect-project
local num_masters=$(gcloud compute instances list \
--project "${PROJECT}" \
--filter="name ~ '$(get-replica-name-regexp)'" \
--format "value(zone)" | wc -l)
echo -n "${num_masters}"
}
# Prints regexp for full master machine name. In a cluster with replicated master,
# VM names may either be MASTER_NAME or MASTER_NAME with a suffix for a replica.
function get-replica-name-regexp() {
echo "^${MASTER_NAME}(-...)?"
}
# Sets REPLICA_NAME to a unique name for a master replica that will match
# expected regexp (see get-replica-name-regexp).
#
# Assumed vars:
# PROJECT
# ZONE
# MASTER_NAME
#
# Sets:
# REPLICA_NAME
function set-replica-name() {
local instances=$(gcloud compute instances list \
--project "${PROJECT}" \
--filter="name ~ '$(get-replica-name-regexp)'" \
--format "value(name)")
suffix=""
while echo "${instances}" | grep "${suffix}" &>/dev/null; do
suffix="$(date | md5sum | head -c3)"
done
REPLICA_NAME="${MASTER_NAME}-${suffix}"
}
# Gets the instance template for given NODE_INSTANCE_PREFIX. It echos the template name so that the function
# output can be used.
# Assumed vars:
# NODE_INSTANCE_PREFIX
#
# $1: project
function get-template() {
gcloud compute instance-templates list \
--filter="name ~ '${NODE_INSTANCE_PREFIX}-template(-(${KUBE_RELEASE_VERSION_DASHED_REGEX}|${KUBE_CI_VERSION_DASHED_REGEX}))?'" \
--project="${1}" --format='value(name)'
}
# Checks if there are any present resources related kubernetes cluster.
#
# Assumed vars:
# MASTER_NAME
# NODE_INSTANCE_PREFIX
# ZONE
# REGION
# Vars set:
# KUBE_RESOURCE_FOUND
function check-resources() {
detect-project
detect-node-names
echo "Looking for already existing resources"
KUBE_RESOURCE_FOUND=""
if [[ -n "${INSTANCE_GROUPS[@]:-}" ]]; then
KUBE_RESOURCE_FOUND="Managed instance groups ${INSTANCE_GROUPS[@]}"
return 1
fi
if gcloud compute instance-templates describe --project "${PROJECT}" "${NODE_INSTANCE_PREFIX}-template" &>/dev/null; then
KUBE_RESOURCE_FOUND="Instance template ${NODE_INSTANCE_PREFIX}-template"
return 1
fi
if gcloud compute instances describe --project "${PROJECT}" "${MASTER_NAME}" --zone "${ZONE}" &>/dev/null; then
KUBE_RESOURCE_FOUND="Kubernetes master ${MASTER_NAME}"
return 1
fi
if gcloud compute disks describe --project "${PROJECT}" "${MASTER_NAME}"-pd --zone "${ZONE}" &>/dev/null; then
KUBE_RESOURCE_FOUND="Persistent disk ${MASTER_NAME}-pd"
return 1
fi
# Find out what minions are running.
local -a minions
minions=( $(gcloud compute instances list \
--project "${PROJECT}" \
--filter="name ~ '${NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \
--format='value(name)') )
if (( "${#minions[@]}" > 0 )); then
KUBE_RESOURCE_FOUND="${#minions[@]} matching matching ${NODE_INSTANCE_PREFIX}-.+"
return 1
fi
if gcloud compute firewall-rules describe --project "${NETWORK_PROJECT}" "${MASTER_NAME}-https" &>/dev/null; then
KUBE_RESOURCE_FOUND="Firewall rules for ${MASTER_NAME}-https"
return 1
fi
if gcloud compute firewall-rules describe --project "${NETWORK_PROJECT}" "${NODE_TAG}-all" &>/dev/null; then
KUBE_RESOURCE_FOUND="Firewall rules for ${MASTER_NAME}-all"
return 1
fi
local -a routes
routes=( $(gcloud compute routes list --project "${NETWORK_PROJECT}" \
--filter="name ~ '${INSTANCE_PREFIX}-minion-.{4}'" --format='value(name)') )
if (( "${#routes[@]}" > 0 )); then
KUBE_RESOURCE_FOUND="${#routes[@]} routes matching ${INSTANCE_PREFIX}-minion-.{4}"
return 1
fi
if gcloud compute addresses describe --project "${PROJECT}" "${MASTER_NAME}-ip" --region "${REGION}" &>/dev/null; then
KUBE_RESOURCE_FOUND="Master's reserved IP"
return 1
fi
# No resources found.
return 0
}
# -----------------------------------------------------------------------------
# Cluster specific test helpers used from hack/e2e.go
# Execute prior to running tests to build a release if required for env.
#
# Assumed Vars:
# KUBE_ROOT
function test-build-release() {
# Make a release
"${KUBE_ROOT}/build/release.sh"
}
# Execute prior to running tests to initialize required structure. This is
# called from hack/e2e.go only when running -up.
#
# Assumed vars:
# Variables from config.sh
function test-setup() {
# Detect the project into $PROJECT if it isn't set
detect-project
if [[ ${MULTIZONE:-} == "true" && -n ${E2E_ZONES:-} ]]; then
for KUBE_GCE_ZONE in ${E2E_ZONES}; do
KUBE_GCE_ZONE="${KUBE_GCE_ZONE}" KUBE_USE_EXISTING_MASTER="${KUBE_USE_EXISTING_MASTER:-}" "${KUBE_ROOT}/cluster/kube-up.sh"
KUBE_USE_EXISTING_MASTER="true" # For subsequent zones we use the existing master
done
else
"${KUBE_ROOT}/cluster/kube-up.sh"
fi
# Open up port 80 & 8080 so common containers on minions can be reached
# TODO(roberthbailey): Remove this once we are no longer relying on hostPorts.
local start=`date +%s`
gcloud compute firewall-rules create \
--project "${NETWORK_PROJECT}" \
--target-tags "${NODE_TAG}" \
--allow tcp:80,tcp:8080 \
--network "${NETWORK}" \
"${NODE_TAG}-${INSTANCE_PREFIX}-http-alt" 2> /dev/null || true
# As there is no simple way to wait longer for this operation we need to manually
# wait some additional time (20 minutes altogether).
while ! gcloud compute firewall-rules describe --project "${NETWORK_PROJECT}" "${NODE_TAG}-${INSTANCE_PREFIX}-http-alt" 2> /dev/null; do
if [[ $(($start + 1200)) -lt `date +%s` ]]; then
echo -e "${color_red}Failed to create firewall ${NODE_TAG}-${INSTANCE_PREFIX}-http-alt in ${NETWORK_PROJECT}" >&2
exit 1
fi
sleep 5
done
# Open up the NodePort range
# TODO(justinsb): Move to main setup, if we decide whether we want to do this by default.
start=`date +%s`
gcloud compute firewall-rules create \
--project "${NETWORK_PROJECT}" \
--target-tags "${NODE_TAG}" \
--allow tcp:30000-32767,udp:30000-32767 \
--network "${NETWORK}" \
"${NODE_TAG}-${INSTANCE_PREFIX}-nodeports" 2> /dev/null || true
# As there is no simple way to wait longer for this operation we need to manually
# wait some additional time (20 minutes altogether).
while ! gcloud compute firewall-rules describe --project "${NETWORK_PROJECT}" "${NODE_TAG}-${INSTANCE_PREFIX}-nodeports" 2> /dev/null; do
if [[ $(($start + 1200)) -lt `date +%s` ]]; then
echo -e "${color_red}Failed to create firewall ${NODE_TAG}-${INSTANCE_PREFIX}-nodeports in ${PROJECT}" >&2
exit 1
fi
sleep 5
done
}
# Execute after running tests to perform any required clean-up. This is called
# from hack/e2e.go
function test-teardown() {
detect-project
echo "Shutting down test cluster in background."
delete-firewall-rules \
"${NODE_TAG}-${INSTANCE_PREFIX}-http-alt" \
"${NODE_TAG}-${INSTANCE_PREFIX}-nodeports"
if [[ ${MULTIZONE:-} == "true" && -n ${E2E_ZONES:-} ]]; then
local zones=( ${E2E_ZONES} )
# tear them down in reverse order, finally tearing down the master too.
for ((zone_num=${#zones[@]}-1; zone_num>0; zone_num--)); do
KUBE_GCE_ZONE="${zones[zone_num]}" KUBE_USE_EXISTING_MASTER="true" "${KUBE_ROOT}/cluster/kube-down.sh"
done
KUBE_GCE_ZONE="${zones[0]}" KUBE_USE_EXISTING_MASTER="false" "${KUBE_ROOT}/cluster/kube-down.sh"
else
"${KUBE_ROOT}/cluster/kube-down.sh"
fi
}
# SSH to a node by name ($1) and run a command ($2).
function ssh-to-node() {
local node="$1"
local cmd="$2"
# Loop until we can successfully ssh into the box
for try in {1..5}; do
if gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --ssh-flag="-o ConnectTimeout=30" --project "${PROJECT}" --zone="${ZONE}" "${node}" --command "echo test > /dev/null"; then
break
fi
sleep 5
done
# Then actually try the command.
gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --ssh-flag="-o ConnectTimeout=30" --project "${PROJECT}" --zone="${ZONE}" "${node}" --command "${cmd}"
}
# Perform preparations required to run e2e tests
function prepare-e2e() {
detect-project
}
# Delete the image given by $1.
function delete-image() {
gcloud container images delete --quiet "$1"
}
|
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The golang package that we are building.
readonly KUBE_GO_PACKAGE=k8s.io/kubernetes
readonly KUBE_GOPATH="${KUBE_OUTPUT}/go"
# The set of server targets that we are only building for Linux
# If you update this list, please also update build/BUILD.
kube::golang::server_targets() {
local targets=(
cmd/kube-proxy
cmd/kube-apiserver
cmd/kube-controller-manager
cmd/cloud-controller-manager
cmd/kubelet
cmd/kubeadm
cmd/hyperkube
cmd/kube-scheduler
vendor/k8s.io/apiextensions-apiserver
cluster/gce/gci/mounter
)
echo "${targets[@]}"
}
IFS=" " read -ra KUBE_SERVER_TARGETS <<< "$(kube::golang::server_targets)"
readonly KUBE_SERVER_TARGETS
readonly KUBE_SERVER_BINARIES=("${KUBE_SERVER_TARGETS[@]##*/}")
# The set of server targets we build docker images for
kube::golang::server_image_targets() {
# NOTE: this contains cmd targets for kube::build::get_docker_wrapped_binaries
local targets=(
cmd/cloud-controller-manager
cmd/kube-apiserver
cmd/kube-controller-manager
cmd/kube-scheduler
cmd/kube-proxy
)
echo "${targets[@]}"
}
IFS=" " read -ra KUBE_SERVER_IMAGE_TARGETS <<< "$(kube::golang::server_image_targets)"
readonly KUBE_SERVER_IMAGE_TARGETS
readonly KUBE_SERVER_IMAGE_BINARIES=("${KUBE_SERVER_IMAGE_TARGETS[@]##*/}")
# The set of server targets that we are only building for Kubernetes nodes
# If you update this list, please also update build/BUILD.
kube::golang::node_targets() {
local targets=(
cmd/kube-proxy
cmd/kubeadm
cmd/kubelet
)
echo "${targets[@]}"
}
IFS=" " read -ra KUBE_NODE_TARGETS <<< "$(kube::golang::node_targets)"
readonly KUBE_NODE_TARGETS
readonly KUBE_NODE_BINARIES=("${KUBE_NODE_TARGETS[@]##*/}")
readonly KUBE_NODE_BINARIES_WIN=("${KUBE_NODE_BINARIES[@]/%/.exe}")
if [[ -n "${KUBE_BUILD_PLATFORMS:-}" ]]; then
IFS=" " read -ra KUBE_SERVER_PLATFORMS <<< "$KUBE_BUILD_PLATFORMS"
IFS=" " read -ra KUBE_NODE_PLATFORMS <<< "$KUBE_BUILD_PLATFORMS"
IFS=" " read -ra KUBE_TEST_PLATFORMS <<< "$KUBE_BUILD_PLATFORMS"
IFS=" " read -ra KUBE_CLIENT_PLATFORMS <<< "$KUBE_BUILD_PLATFORMS"
readonly KUBE_SERVER_PLATFORMS
readonly KUBE_NODE_PLATFORMS
readonly KUBE_TEST_PLATFORMS
readonly KUBE_CLIENT_PLATFORMS
elif [[ "${KUBE_FASTBUILD:-}" == "true" ]]; then
readonly KUBE_SERVER_PLATFORMS=(linux/amd64)
readonly KUBE_NODE_PLATFORMS=(linux/amd64)
if [[ "${KUBE_BUILDER_OS:-}" == "darwin"* ]]; then
readonly KUBE_TEST_PLATFORMS=(
darwin/amd64
linux/amd64
)
readonly KUBE_CLIENT_PLATFORMS=(
darwin/amd64
linux/amd64
)
else
readonly KUBE_TEST_PLATFORMS=(linux/amd64)
readonly KUBE_CLIENT_PLATFORMS=(linux/amd64)
fi
else
# The server platform we are building on.
readonly KUBE_SERVER_PLATFORMS=(
linux/amd64
linux/arm
linux/arm64
linux/s390x
linux/ppc64le
)
# The node platforms we build for
readonly KUBE_NODE_PLATFORMS=(
linux/amd64
linux/arm
linux/arm64
linux/s390x
linux/ppc64le
windows/amd64
)
# If we update this we should also update the set of platforms whose standard library is precompiled for in build/build-image/cross/Dockerfile
readonly KUBE_CLIENT_PLATFORMS=(
linux/amd64
linux/386
linux/arm
linux/arm64
linux/s390x
linux/ppc64le
darwin/amd64
darwin/386
windows/amd64
windows/386
)
# Which platforms we should compile test targets for. Not all client platforms need these tests
readonly KUBE_TEST_PLATFORMS=(
linux/amd64
linux/arm
linux/arm64
linux/s390x
linux/ppc64le
darwin/amd64
windows/amd64
)
fi
# The set of client targets that we are building for all platforms
# If you update this list, please also update build/BUILD.
readonly KUBE_CLIENT_TARGETS=(
cmd/kubectl
)
readonly KUBE_CLIENT_BINARIES=("${KUBE_CLIENT_TARGETS[@]##*/}")
readonly KUBE_CLIENT_BINARIES_WIN=("${KUBE_CLIENT_BINARIES[@]/%/.exe}")
# The set of test targets that we are building for all platforms
# If you update this list, please also update build/BUILD.
kube::golang::test_targets() {
local targets=(
cmd/gendocs
cmd/genkubedocs
cmd/genman
cmd/genyaml
cmd/genswaggertypedocs
cmd/linkcheck
vendor/github.com/onsi/ginkgo/ginkgo
test/e2e/e2e.test
)
echo "${targets[@]}"
}
IFS=" " read -ra KUBE_TEST_TARGETS <<< "$(kube::golang::test_targets)"
readonly KUBE_TEST_TARGETS
readonly KUBE_TEST_BINARIES=("${KUBE_TEST_TARGETS[@]##*/}")
readonly KUBE_TEST_BINARIES_WIN=("${KUBE_TEST_BINARIES[@]/%/.exe}")
# If you update this list, please also update build/BUILD.
readonly KUBE_TEST_PORTABLE=(
test/e2e/testing-manifests
test/kubemark
hack/e2e.go
hack/e2e-internal
hack/get-build.sh
hack/ginkgo-e2e.sh
hack/lib
)
# Test targets which run on the Kubernetes clusters directly, so we only
# need to target server platforms.
# These binaries will be distributed in the kubernetes-test tarball.
# If you update this list, please also update build/BUILD.
kube::golang::server_test_targets() {
local targets=(
cmd/kubemark
vendor/github.com/onsi/ginkgo/ginkgo
)
if [[ "${OSTYPE:-}" == "linux"* ]]; then
targets+=( test/e2e_node/e2e_node.test )
fi
echo "${targets[@]}"
}
IFS=" " read -ra KUBE_TEST_SERVER_TARGETS <<< "$(kube::golang::server_test_targets)"
readonly KUBE_TEST_SERVER_TARGETS
readonly KUBE_TEST_SERVER_BINARIES=("${KUBE_TEST_SERVER_TARGETS[@]##*/}")
readonly KUBE_TEST_SERVER_PLATFORMS=("${KUBE_SERVER_PLATFORMS[@]}")
# Gigabytes necessary for parallel platform builds.
# As of January 2018, RAM usage is exceeding 30G
# Setting to 40 to provide some headroom
readonly KUBE_PARALLEL_BUILD_MEMORY=40
readonly KUBE_ALL_TARGETS=(
"${KUBE_SERVER_TARGETS[@]}"
"${KUBE_CLIENT_TARGETS[@]}"
"${KUBE_TEST_TARGETS[@]}"
"${KUBE_TEST_SERVER_TARGETS[@]}"
)
readonly KUBE_ALL_BINARIES=("${KUBE_ALL_TARGETS[@]##*/}")
readonly KUBE_STATIC_LIBRARIES=(
cloud-controller-manager
kube-apiserver
kube-controller-manager
kube-scheduler
kube-proxy
kubeadm
kubectl
)
# Fully-qualified package names that we want to instrument for coverage information.
readonly KUBE_COVERAGE_INSTRUMENTED_PACKAGES=(
k8s.io/kubernetes/cmd/kube-apiserver
k8s.io/kubernetes/cmd/kube-controller-manager
k8s.io/kubernetes/cmd/kube-scheduler
k8s.io/kubernetes/cmd/kube-proxy
k8s.io/kubernetes/cmd/kubelet
)
# KUBE_CGO_OVERRIDES is a space-separated list of binaries which should be built
# with CGO enabled, assuming CGO is supported on the target platform.
# This overrides any entry in KUBE_STATIC_LIBRARIES.
IFS=" " read -ra KUBE_CGO_OVERRIDES <<< "${KUBE_CGO_OVERRIDES:-}"
readonly KUBE_CGO_OVERRIDES
# KUBE_STATIC_OVERRIDES is a space-separated list of binaries which should be
# built with CGO disabled. This is in addition to the list in
# KUBE_STATIC_LIBRARIES.
IFS=" " read -ra KUBE_STATIC_OVERRIDES <<< "${KUBE_STATIC_OVERRIDES:-}"
readonly KUBE_STATIC_OVERRIDES
kube::golang::is_statically_linked_library() {
local e
# Explicitly enable cgo when building kubectl for darwin from darwin.
[[ "$(go env GOHOSTOS)" == "darwin" && "$(go env GOOS)" == "darwin" &&
"$1" == *"/kubectl" ]] && return 1
if [[ -n "${KUBE_CGO_OVERRIDES:+x}" ]]; then
for e in "${KUBE_CGO_OVERRIDES[@]}"; do [[ "$1" == *"/$e" ]] && return 1; done;
fi
for e in "${KUBE_STATIC_LIBRARIES[@]}"; do [[ "$1" == *"/$e" ]] && return 0; done;
if [[ -n "${KUBE_STATIC_OVERRIDES:+x}" ]]; then
for e in "${KUBE_STATIC_OVERRIDES[@]}"; do [[ "$1" == *"/$e" ]] && return 0; done;
fi
return 1;
}
# kube::binaries_from_targets take a list of build targets and return the
# full go package to be built
kube::golang::binaries_from_targets() {
local target
for target; do
# If the target starts with what looks like a domain name, assume it has a
# fully-qualified package name rather than one that needs the Kubernetes
# package prepended.
if [[ "${target}" =~ ^([[:alnum:]]+".")+[[:alnum:]]+"/" ]]; then
echo "${target}"
else
echo "${KUBE_GO_PACKAGE}/${target}"
fi
done
}
# Asks golang what it thinks the host platform is. The go tool chain does some
# slightly different things when the target platform matches the host platform.
kube::golang::host_platform() {
echo "$(go env GOHOSTOS)/$(go env GOHOSTARCH)"
}
# Takes the platform name ($1) and sets the appropriate golang env variables
# for that platform.
kube::golang::set_platform_envs() {
[[ -n ${1-} ]] || {
kube::log::error_exit "!!! Internal error. No platform set in kube::golang::set_platform_envs"
}
export GOOS=${platform%/*}
export GOARCH=${platform##*/}
# Do not set CC when building natively on a platform, only if cross-compiling from linux/amd64
if [[ $(kube::golang::host_platform) == "linux/amd64" ]]; then
# Dynamic CGO linking for other server architectures than linux/amd64 goes here
# If you want to include support for more server platforms than these, add arch-specific gcc names here
case "${platform}" in
"linux/arm")
export CGO_ENABLED=1
export CC=arm-linux-gnueabihf-gcc
;;
"linux/arm64")
export CGO_ENABLED=1
export CC=aarch64-linux-gnu-gcc
;;
"linux/ppc64le")
export CGO_ENABLED=1
export CC=powerpc64le-linux-gnu-gcc
;;
"linux/s390x")
export CGO_ENABLED=1
export CC=s390x-linux-gnu-gcc
;;
esac
fi
}
kube::golang::unset_platform_envs() {
unset GOOS
unset GOARCH
unset GOROOT
unset CGO_ENABLED
unset CC
}
# Create the GOPATH tree under $KUBE_OUTPUT
kube::golang::create_gopath_tree() {
local go_pkg_dir="${KUBE_GOPATH}/src/${KUBE_GO_PACKAGE}"
local go_pkg_basedir=$(dirname "${go_pkg_dir}")
mkdir -p "${go_pkg_basedir}"
# TODO: This symlink should be relative.
if [[ ! -e "${go_pkg_dir}" || "$(readlink ${go_pkg_dir})" != "${KUBE_ROOT}" ]]; then
ln -snf "${KUBE_ROOT}" "${go_pkg_dir}"
fi
cat >"${KUBE_GOPATH}/BUILD" <<EOF
# This dummy BUILD file prevents Bazel from trying to descend through the
# infinite loop created by the symlink at
# ${go_pkg_dir}
EOF
}
# Ensure the go tool exists and is a viable version.
kube::golang::verify_go_version() {
if [[ -z "$(which go)" ]]; then
kube::log::usage_from_stdin <<EOF
Can't find 'go' in PATH, please fix and retry.
See http://golang.org/doc/install for installation instructions.
EOF
return 2
fi
local go_version
IFS=" " read -ra go_version <<< "$(go version)"
local minimum_go_version
minimum_go_version=go1.10.2
if [[ "${minimum_go_version}" != $(echo -e "${minimum_go_version}\n${go_version[2]}" | sort -s -t. -k 1,1 -k 2,2n -k 3,3n | head -n1) && "${go_version[2]}" != "devel" ]]; then
kube::log::usage_from_stdin <<EOF
Detected go version: ${go_version[*]}.
Kubernetes requires ${minimum_go_version} or greater.
Please install ${minimum_go_version} or later.
EOF
return 2
fi
}
# kube::golang::setup_env will check that the `go` commands is available in
# ${PATH}. It will also check that the Go version is good enough for the
# Kubernetes build.
#
# Inputs:
# KUBE_EXTRA_GOPATH - If set, this is included in created GOPATH
#
# Outputs:
# env-var GOPATH points to our local output dir
# env-var GOBIN is unset (we want binaries in a predictable place)
# env-var GO15VENDOREXPERIMENT=1
# current directory is within GOPATH
kube::golang::setup_env() {
kube::golang::verify_go_version
kube::golang::create_gopath_tree
export GOPATH="${KUBE_GOPATH}"
export GOCACHE="${KUBE_GOPATH}/cache"
# Append KUBE_EXTRA_GOPATH to the GOPATH if it is defined.
if [[ -n ${KUBE_EXTRA_GOPATH:-} ]]; then
GOPATH="${GOPATH}:${KUBE_EXTRA_GOPATH}"
fi
# Make sure our own Go binaries are in PATH.
export PATH="${KUBE_GOPATH}/bin:${PATH}"
# Change directories so that we are within the GOPATH. Some tools get really
# upset if this is not true. We use a whole fake GOPATH here to collect the
# resultant binaries. Go will not let us use GOBIN with `go install` and
# cross-compiling, and `go install -o <file>` only works for a single pkg.
local subdir
subdir=$(kube::realpath . | sed "s|$KUBE_ROOT||")
cd "${KUBE_GOPATH}/src/${KUBE_GO_PACKAGE}/${subdir}"
# Set GOROOT so binaries that parse code can work properly.
export GOROOT=$(go env GOROOT)
# Unset GOBIN in case it already exists in the current session.
unset GOBIN
# This seems to matter to some tools (godep, ginkgo...)
export GO15VENDOREXPERIMENT=1
}
# This will take binaries from $GOPATH/bin and copy them to the appropriate
# place in ${KUBE_OUTPUT_BINDIR}
#
# Ideally this wouldn't be necessary and we could just set GOBIN to
# KUBE_OUTPUT_BINDIR but that won't work in the face of cross compilation. 'go
# install' will place binaries that match the host platform directly in $GOBIN
# while placing cross compiled binaries into `platform_arch` subdirs. This
# complicates pretty much everything else we do around packaging and such.
kube::golang::place_bins() {
local host_platform
host_platform=$(kube::golang::host_platform)
V=2 kube::log::status "Placing binaries"
local platform
for platform in "${KUBE_CLIENT_PLATFORMS[@]}"; do
# The substitution on platform_src below will replace all slashes with
# underscores. It'll transform darwin/amd64 -> darwin_amd64.
local platform_src="/${platform//\//_}"
if [[ "$platform" == "$host_platform" ]]; then
platform_src=""
rm -f "${THIS_PLATFORM_BIN}"
ln -s "${KUBE_OUTPUT_BINPATH}/${platform}" "${THIS_PLATFORM_BIN}"
fi
local full_binpath_src="${KUBE_GOPATH}/bin${platform_src}"
if [[ -d "${full_binpath_src}" ]]; then
mkdir -p "${KUBE_OUTPUT_BINPATH}/${platform}"
find "${full_binpath_src}" -maxdepth 1 -type f -exec \
rsync -pc {} "${KUBE_OUTPUT_BINPATH}/${platform}" \;
fi
done
}
# Try and replicate the native binary placement of go install without
# calling go install.
kube::golang::outfile_for_binary() {
local binary=$1
local platform=$2
local output_path="${KUBE_GOPATH}/bin"
if [[ "$platform" != "$host_platform" ]]; then
output_path="${output_path}/${platform//\//_}"
fi
local bin=$(basename "${binary}")
if [[ ${GOOS} == "windows" ]]; then
bin="${bin}.exe"
fi
echo "${output_path}/${bin}"
}
# Argument: the name of a Kubernetes package.
# Returns 0 if the binary can be built with coverage, 1 otherwise.
# NB: this ignores whether coverage is globally enabled or not.
kube::golang::is_instrumented_package() {
return $(kube::util::array_contains "$1" "${KUBE_COVERAGE_INSTRUMENTED_PACKAGES[@]}")
}
# Argument: the name of a Kubernetes package (e.g. k8s.io/kubernetes/cmd/kube-scheduler)
# Echos the path to a dummy test used for coverage information.
kube::golang::path_for_coverage_dummy_test() {
local package="$1"
local path="${KUBE_GOPATH}/src/${package}"
local name=$(basename "${package}")
echo "$path/zz_generated_${name}_test.go"
}
# Argument: the name of a Kubernetes package (e.g. k8s.io/kubernetes/cmd/kube-scheduler).
# Creates a dummy unit test on disk in the source directory for the given package.
# This unit test will invoke the package's standard entry point when run.
kube::golang::create_coverage_dummy_test() {
local package="$1"
local name="$(basename "${package}")"
cat <<EOF > $(kube::golang::path_for_coverage_dummy_test "${package}")
package main
import (
"testing"
"k8s.io/kubernetes/pkg/util/coverage"
)
func TestMain(m *testing.M) {
// Get coverage running
coverage.InitCoverage("${name}")
// Go!
main()
// Make sure we actually write the profiling information to disk, if we make it here.
// On long-running services, or anything that calls os.Exit(), this is insufficient,
// so we also flush periodically with a default period of five seconds (configurable by
// the KUBE_COVERAGE_FLUSH_INTERVAL environment variable).
coverage.FlushCoverage()
}
EOF
}
# Argument: the name of a Kubernetes package (e.g. k8s.io/kubernetes/cmd/kube-scheduler).
# Deletes a test generated by kube::golang::create_coverage_dummy_test.
# It is not an error to call this for a nonexistent test.
kube::golang::delete_coverage_dummy_test() {
local package="$1"
rm -f $(kube::golang::path_for_coverage_dummy_test "${package}")
}
# Arguments: a list of kubernetes packages to build.
# Expected variables: ${build_args} should be set to an array of Go build arguments.
# In addition, ${package} and ${platform} should have been set earlier, and if
# ${KUBE_BUILD_WITH_COVERAGE} is set, coverage instrumentation will be enabled.
#
# Invokes Go to actually build some packages. If coverage is disabled, simply invokes
# go install. If coverage is enabled, builds covered binaries using go test, temporarily
# producing the required unit test files and then cleaning up after itself.
# Non-covered binaries are then built using go install as usual.
kube::golang::build_some_binaries() {
if [[ -n "${KUBE_BUILD_WITH_COVERAGE:-}" ]]; then
local -a uncovered=()
for package in "$@"; do
if kube::golang::is_instrumented_package "${package}"; then
V=2 kube::log::info "Building ${package} with coverage..."
kube::golang::create_coverage_dummy_test "${package}"
kube::util::trap_add "kube::golang::delete_coverage_dummy_test \"${package}\"" EXIT
go test -c -o "$(kube::golang::outfile_for_binary "${package}" "${platform}")" \
-covermode count \
-coverpkg k8s.io/... \
"${build_args[@]}" \
-tags coverage \
"${package}"
else
uncovered+=("${package}")
fi
done
if [[ "${#uncovered[@]}" != 0 ]]; then
V=2 kube::log::info "Building ${uncovered[@]} without coverage..."
go install "${build_args[@]}" "${uncovered[@]}"
else
V=2 kube::log::info "Nothing to build without coverage."
fi
else
V=2 kube::log::info "Coverage is disabled."
go install "${build_args[@]}" "$@"
fi
}
kube::golang::build_binaries_for_platform() {
local platform=$1
local -a statics=()
local -a nonstatics=()
local -a tests=()
V=2 kube::log::info "Env for ${platform}: GOOS=${GOOS-} GOARCH=${GOARCH-} GOROOT=${GOROOT-} CGO_ENABLED=${CGO_ENABLED-} CC=${CC-}"
for binary in "${binaries[@]}"; do
if [[ "${binary}" =~ ".test"$ ]]; then
tests+=($binary)
elif kube::golang::is_statically_linked_library "${binary}"; then
statics+=($binary)
else
nonstatics+=($binary)
fi
done
local -a build_args
if [[ "${#statics[@]}" != 0 ]]; then
build_args=(
-installsuffix static
${goflags:+"${goflags[@]}"}
-gcflags "${gogcflags:-}"
-ldflags "${goldflags:-}"
)
CGO_ENABLED=0 kube::golang::build_some_binaries "${statics[@]}"
fi
if [[ "${#nonstatics[@]}" != 0 ]]; then
build_args=(
${goflags:+"${goflags[@]}"}
-gcflags "${gogcflags:-}"
-ldflags "${goldflags:-}"
)
kube::golang::build_some_binaries "${nonstatics[@]}"
fi
for test in "${tests[@]:+${tests[@]}}"; do
local outfile=$(kube::golang::outfile_for_binary "${test}" "${platform}")
local testpkg="$(dirname ${test})"
mkdir -p "$(dirname ${outfile})"
go test -c \
${goflags:+"${goflags[@]}"} \
-gcflags "${gogcflags:-}" \
-ldflags "${goldflags:-}" \
-o "${outfile}" \
"${testpkg}"
done
}
# Return approximate physical memory available in gigabytes.
kube::golang::get_physmem() {
local mem
# Linux kernel version >=3.14, in kb
if mem=$(grep MemAvailable /proc/meminfo | awk '{ print $2 }'); then
echo $(( ${mem} / 1048576 ))
return
fi
# Linux, in kb
if mem=$(grep MemTotal /proc/meminfo | awk '{ print $2 }'); then
echo $(( ${mem} / 1048576 ))
return
fi
# OS X, in bytes. Note that get_physmem, as used, should only ever
# run in a Linux container (because it's only used in the multiple
# platform case, which is a Dockerized build), but this is provided
# for completeness.
if mem=$(sysctl -n hw.memsize 2>/dev/null); then
echo $(( ${mem} / 1073741824 ))
return
fi
# If we can't infer it, just give up and assume a low memory system
echo 1
}
# Build binaries targets specified
#
# Input:
# $@ - targets and go flags. If no targets are set then all binaries targets
# are built.
# KUBE_BUILD_PLATFORMS - Incoming variable of targets to build for. If unset
# then just the host architecture is built.
kube::golang::build_binaries() {
# Create a sub-shell so that we don't pollute the outer environment
(
# Check for `go` binary and set ${GOPATH}.
kube::golang::setup_env
V=2 kube::log::info "Go version: $(go version)"
local host_platform
host_platform=$(kube::golang::host_platform)
# Use eval to preserve embedded quoted strings.
local goflags goldflags gogcflags
eval "goflags=(${GOFLAGS:-})"
goldflags="${GOLDFLAGS:-} $(kube::version::ldflags)"
gogcflags="${GOGCFLAGS:-}"
local -a targets=()
local arg
for arg; do
if [[ "${arg}" == -* ]]; then
# Assume arguments starting with a dash are flags to pass to go.
goflags+=("${arg}")
else
targets+=("${arg}")
fi
done
if [[ ${#targets[@]} -eq 0 ]]; then
targets=("${KUBE_ALL_TARGETS[@]}")
fi
local -a platforms
IFS=" " read -ra platforms <<< "${KUBE_BUILD_PLATFORMS:-}"
if [[ ${#platforms[@]} -eq 0 ]]; then
platforms=("${host_platform}")
fi
local binaries
binaries=($(kube::golang::binaries_from_targets "${targets[@]}"))
local parallel=false
if [[ ${#platforms[@]} -gt 1 ]]; then
local gigs
gigs=$(kube::golang::get_physmem)
if [[ ${gigs} -ge ${KUBE_PARALLEL_BUILD_MEMORY} ]]; then
kube::log::status "Multiple platforms requested and available ${gigs}G >= threshold ${KUBE_PARALLEL_BUILD_MEMORY}G, building platforms in parallel"
parallel=true
else
kube::log::status "Multiple platforms requested, but available ${gigs}G < threshold ${KUBE_PARALLEL_BUILD_MEMORY}G, building platforms in serial"
parallel=false
fi
fi
if [[ "${parallel}" == "true" ]]; then
kube::log::status "Building go targets for {${platforms[*]}} in parallel (output will appear in a burst when complete):" "${targets[@]}"
local platform
for platform in "${platforms[@]}"; do (
kube::golang::set_platform_envs "${platform}"
kube::log::status "${platform}: build started"
kube::golang::build_binaries_for_platform ${platform}
kube::log::status "${platform}: build finished"
) &> "/tmp//${platform//\//_}.build" &
done
local fails=0
for job in $(jobs -p); do
wait ${job} || let "fails+=1"
done
for platform in "${platforms[@]}"; do
cat "/tmp//${platform//\//_}.build"
done
exit ${fails}
else
for platform in "${platforms[@]}"; do
kube::log::status "Building go targets for ${platform}:" "${targets[@]}"
(
kube::golang::set_platform_envs "${platform}"
kube::golang::build_binaries_for_platform ${platform}
)
done
fi
)
}
|
xrandr --output eDP1 --set Backlight $(( $(xrandr --props|grep Backlight:|cut -d' ' -f2) + $1 ))
|
<filename>weather-pi/temperature-monitor.py
#!/usr/bin/env python
from envirophat import weather
import requests
import time
from subprocess import PIPE, Popen
def get_cpu_temperature():
process = Popen(['vcgencmd', 'measure_temp'], stdout=PIPE)
output, _error = process.communicate()
return float(output[output.index('=') + 1:output.rindex("'")])
def adjust_temperature(sensor_temp):
cpu_temp_c = get_cpu_temperature()
temp_c_cal = sensor_temp - ((cpu_temp_c-sensor_temp)/1.3)
return temp_c_cal
weather.altitude(qnh=1020)
while True:
temp = weather.temperature()
temp = adjust_temperature(temp)
pressure = weather.pressure(unit='hPa')
print(temp, pressure)
req = requests.post('http://192.168.1.23:8888/temperature', json={'temp':temp, 'pressure': pressure})
print('status:', req.status_code)
time.sleep(60)
|
<filename>src/main/java/com/supanadit/restsuite/panel/rest/request/tab/param/ParamsPanel.java
package com.supanadit.restsuite.panel.rest.request.tab.param;
import com.supanadit.restsuite.component.input.api.InputTextURL;
import com.supanadit.restsuite.component.table.ParamsTable;
import com.supanadit.restsuite.helper.UrlParser;
import io.reactivex.disposables.Disposable;
import net.miginfocom.swing.MigLayout;
import javax.swing.*;
import java.util.concurrent.TimeUnit;
public class ParamsPanel extends JPanel {
public ParamsPanel(InputTextURL inputTextURL) {
super(new MigLayout());
ParamsTable paramsTable = new ParamsTable(false, null);
Disposable disposable = inputTextURL.getSubject().throttleWithTimeout(300, TimeUnit.MILLISECONDS).subscribe((s) -> {
UrlParser urlParser = new UrlParser(s);
paramsTable.setFromRequestArrayList(urlParser.getQueryParams());
});
add(paramsTable, "growx,pushx");
}
}
|
<gh_stars>1000+
#include <set>
#include <sstream>
#include <iostream>
#include <cassert>
#include "CodeGen.h"
#include "Compile.h"
#include "IR.h"
#include "Type.h"
#include "Parser.h"
using namespace Bish;
class TypeAnnotator : public IRVisitor {
public:
TypeAnnotator(std::ostream &os) : stream(os), indent_level(0) {}
void visit(Module *n) {
if (visited(n)) return;
visited_set.insert(n);
for (std::vector<Function *>::const_iterator I = n->functions.begin(),
E = n->functions.end(); I != E; ++I) {
(*I)->accept(this);
}
for (std::vector<Assignment *>::const_iterator I = n->global_variables.begin(),
E = n->global_variables.end(); I != E; ++I) {
(*I)->accept(this);
stream << ";\n";
}
}
void visit(ReturnStatement *n) {
if (visited(n)) return;
visited_set.insert(n);
stream << "return ";
n->value->accept(this);
}
void visit(Block *n) {
if (visited(n)) return;
visited_set.insert(n);
stream << "{\n";
indent_level++;
for (std::vector<IRNode *>::const_iterator I = n->nodes.begin(), E = n->nodes.end();
I != E; ++I) {
indent();
(*I)->accept(this);
if (!dynamic_cast<IfStatement*>(*I) &&
!dynamic_cast<ForLoop*>(*I)) stream << ";\n";
}
indent_level--;
indent();
stream << "}\n\n";
}
void visit(Variable *n) {
stream << n->name.str() << strtype(n);
}
void visit(Location *n) {
n->variable->accept(this);
if (n->offset) {
stream << "[";
n->offset->accept(this);
stream << "]";
stream << strtype(n);
}
}
void visit(IfStatement *n) {
stream << "if (";
n->pblock->condition->accept(this);
stream << ") ";
n->pblock->body->accept(this);
for (std::vector<PredicatedBlock *>::const_iterator I = n->elses.begin(),
E = n->elses.end(); I != E; ++I) {
indent();
stream << "else if (";
(*I)->condition->accept(this);
stream << ") ";
(*I)->body->accept(this);
}
if (n->elseblock) {
indent();
stream << "else ";
n->elseblock->accept(this);
}
}
void visit(ForLoop *n) {
stream << "for (";
n->variable->accept(this);
stream << " in ";
n->lower->accept(this);
if (n->upper) {
stream << " .. ";
n->upper->accept(this);
}
stream << ") ";
n->body->accept(this);
}
void visit(Function *n) {
if (visited(n)) return;
visited_set.insert(n);
stream << "def " << n->name.str() << strtype(n) << " (";
const int nargs = n->args.size();
int i = 0;
for (std::vector<Variable *>::const_iterator I = n->args.begin(),
E = n->args.end(); I != E; ++I, ++i) {
(*I)->accept(this);
if (i < nargs - 1) stream << ", ";
}
stream << ") ";
if (n->body) n->body->accept(this);
}
void visit(FunctionCall *n) {
if (visited(n)) return;
visited_set.insert(n);
const int nargs = n->args.size();
stream << n->function->name.str() << "(";
for (int i = 0; i < nargs; i++) {
n->args[i]->accept(this);
if (i < nargs - 1) stream << ", ";
}
stream << ")";
}
void visit(ExternCall *n) {
if (visited(n)) return;
visited_set.insert(n);
stream << "@(";
for (InterpolatedString::const_iterator I = n->body->begin(), E = n->body->end();
I != E; ++I) {
if ((*I).is_str()) {
stream << (*I).str();
} else {
assert((*I).is_var());
stream << "$";
(*I).var()->accept(this);
}
}
stream << ")";
}
void visit(IORedirection *n) {
if (visited(n)) return;
visited_set.insert(n);
std::string bash_op;
switch (n->op) {
case IORedirection::Pipe:
bash_op = "|";
break;
default:
assert(false && "Unimplemented redirection.");
}
n->a->accept(this);
stream << " " << bash_op << " ";
n->b->accept(this);
}
void visit(Assignment *n) {
if (visited(n)) return;
visited_set.insert(n);
bool array_init = n->values.size() > 1;
n->location->accept(this);
stream << " = ";
if (array_init) stream << "[";
const unsigned sz = n->values.size();
for (unsigned i = 0; i < sz; i++) {
n->values[i]->accept(this);
if (i < sz - 1) stream << ", ";
}
if (array_init) stream << "]";
}
void visit(BinOp *n) {
std::string bash_op;
switch (n->op) {
case BinOp::Eq:
bash_op = "==";
break;
case BinOp::NotEq:
bash_op = "!=";
break;
case BinOp::LT:
bash_op = "<";
break;
case BinOp::LTE:
bash_op = "<=";
break;
case BinOp::GT:
bash_op = ">";
break;
case BinOp::GTE:
bash_op = ">=";
break;
case BinOp::And:
bash_op = "and";
break;
case BinOp::Or:
bash_op = "or";
break;
case BinOp::Add:
bash_op = "+";
break;
case BinOp::Sub:
bash_op = "-";
break;
case BinOp::Mul:
bash_op = "*";
break;
case BinOp::Div:
bash_op = "/";
break;
case BinOp::Mod:
bash_op = "%";
break;
}
n->a->accept(this);
stream << " " << bash_op << " ";
n->b->accept(this);
}
void visit(UnaryOp *n) {
if (visited(n)) return;
visited_set.insert(n);
switch (n->op) {
case UnaryOp::Negate:
stream << "-";
break;
case UnaryOp::Not:
stream << "!";
break;
}
n->a->accept(this);
}
void visit(Integer *n) {
stream << n->value;
}
void visit(Fractional *n) {
stream << n->value;
}
void visit(String *n) {
stream << "\"" << n->value << "\"";
}
void visit(Boolean *n) {
stream << n->value;
}
private:
unsigned indent_level;
std::ostream &stream;
std::set<IRNode *> visited_set;
bool visited(IRNode *n) { return visited_set.find(n) != visited_set.end(); }
void indent() {
for (unsigned i = 0; i < indent_level; i++) {
stream << " ";
}
}
std::string strtype(IRNode *n) {
return "{:" + n->type().str() + "}";
}
};
int main(int argc, char **argv) {
if (argc < 2) {
std::cerr << "USAGE: " << argv[0] << " <INPUT>\n";
std::cerr << " Annotates Bish file <INPUT> with inferred type information.\n";
return 1;
}
std::string path(argv[1]);
Parser p;
Module *m = p.parse(path);
std::stringstream s;
// Don't actually care about the output, just need the compile
// pipeline to run.
CodeGenerators::initialize();
CodeGenerators::CodeGeneratorConstructor cg_constructor =
CodeGenerators::get("bash");
assert(cg_constructor);
compile(m, cg_constructor(s));
TypeAnnotator annotate(std::cout);
m->accept(&annotate);
return 0;
}
|
package de.wwu.wmss.core;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.PrintStream;
import java.util.Date;
import java.util.Enumeration;
import java.util.List;
import java.util.UUID;
import javax.servlet.http.HttpServletRequest;
import org.apache.commons.fileupload.FileItem;
import org.apache.commons.fileupload.FileUploadException;
import org.apache.commons.fileupload.disk.DiskFileItemFactory;
import org.apache.commons.fileupload.servlet.ServletFileUpload;
import org.apache.log4j.Logger;
import de.wwu.wmss.exceptions.InvalidWMSSRequestException;
import de.wwu.wmss.settings.SystemSettings;
public class WMSSImportRequest {
private String format = SystemSettings.getDefaultRDFFormat();
private String commitSize = SystemSettings.getDefaultCommitSize();
private String source = "";
private Date startDate = new Date();
private String hostname = "";
private String urlDataset = "";
private String metadata = "";
private File scoreFile;
private File metadataFile;
private static Logger logger = Logger.getLogger("ImportRequestParser");
public WMSSImportRequest(HttpServletRequest httpRequest) throws InvalidWMSSRequestException{
File uploadDiretory = new File(SystemSettings.getImportdDirectory());
if (!uploadDiretory.exists()) {
uploadDiretory.mkdirs();
}
try {
ServletFileUpload sf = new ServletFileUpload(new DiskFileItemFactory());
List<FileItem> multifiles = sf.parseRequest(httpRequest);
for(FileItem item : multifiles) {
File file = new File(uploadDiretory.getAbsolutePath()+"/"+item.getName());
logger.debug("Uploaded: " + uploadDiretory.getAbsolutePath()+"/"+item.getName());
item.write(file);
if(item.getFieldName().equals("metadata")) {
this.metadataFile = file;
logger.info("Metadata file:\n\n"+this.getMetadataFile().getAbsolutePath());
}
if(item.getFieldName().equals("file")) {
this.scoreFile = file;
logger.debug("Uploaded: " + uploadDiretory.getAbsolutePath()+"/"+this.scoreFile.getName());
}
}
} catch (FileUploadException e) {
e.printStackTrace();
} catch (Exception e) {
e.printStackTrace();
}
Enumeration<String> listParameters = httpRequest.getParameterNames();
while(listParameters.hasMoreElements() ) {
String parameter = (String) listParameters.nextElement();
if (parameter.toLowerCase().equals("format")) {
if(httpRequest.getParameter(parameter).toLowerCase().equals("n-triples")) {
this.format = "N-Triples";
} else if(httpRequest.getParameter(parameter).toLowerCase().equals("json-ld")) {
this.format = "JSON-LD";
} else if(httpRequest.getParameter(parameter).toLowerCase().equals("rdf/xml")) {
this.format = "RDF/XML";
} else if(httpRequest.getParameter(parameter).toLowerCase().equals("turtle")) {
this.format = "Turtle";
} else if(httpRequest.getParameter(parameter).toLowerCase().equals("trig")) {
this.format = "TriG";
} else if(httpRequest.getParameter(parameter).toLowerCase().equals("musicxml")) {
this.format = "musicxml";
} else {
throw new InvalidWMSSRequestException(ErrorCodes.INVALID_RDFFORMAT_DESCRIPTION+" ["+this.format+"]", ErrorCodes.INVALID_RDFFORMAT_CODE , ErrorCodes.INVALID_RDFFORMAT_HINT);
}
}
if (parameter.toLowerCase().equals("commitsize")) {
this.commitSize = httpRequest.getParameter(parameter);
}
if (parameter.toLowerCase().equals("source")) {
this.source = httpRequest.getParameter(parameter);
}
if (parameter.toLowerCase().equals("url")) {
this.urlDataset = httpRequest.getParameter(parameter);
}
if (parameter.toLowerCase().equals("metadata")) {
File metadataFile = new File(uploadDiretory.getAbsolutePath()+"/"+UUID.randomUUID());
try (PrintStream out = new PrintStream(new FileOutputStream(metadataFile.getAbsolutePath()))) {
out.print(httpRequest.getParameter(parameter));
} catch (FileNotFoundException e) {
e.printStackTrace();
}
this.metadataFile = metadataFile;
}
this.hostname = httpRequest.getServerName();
}
}
public String getFormat() {
return format;
}
public String getCommitSize() {
return commitSize;
}
public String getSource() {
return source;
}
public Date getStartDate() {
return startDate;
}
public String getHostname() {
return hostname;
}
public String getUrlDataset() {
return urlDataset;
}
public void setFormat(String format) {
this.format = format;
}
public String getMetadata() {
return metadata;
}
public File getScoreFile() {
return scoreFile;
}
public File getMetadataFile() {
return metadataFile;
}
}
|
product_data = [
('code', 'str'),
('name', 'str'),
('quantity', 'int')
] |
#!/bin/bash
# Set the BUILD_VERSION variable to the current date in the format YYYYMMDD
BUILD_VERSION=$(date +"%Y%m%d")
# Run the Docker container with the specified configurations
docker run -d \
-p 8000:8000 \
--name management \
--restart=always \
--link mongo:mongo \
--link redis:redis \
-e REDIS_PASSWORD=<PASSWORD> \
<image_name> |
#include "pulseout.h"
#include <stdbool.h>
#include <stdlib.h>
#include <pulse/thread-mainloop.h>
#include <pulse/stream.h>
struct pulseout_state {
struct sound_state ss;
bool paused;
bool flush;
unsigned srate;
sound_callback cbfunc;
void *userptr;
pa_threaded_mainloop *pa_tm;
pa_context *pa_c;
pa_stream *pa_s;
bool pa_status_changed;
};
#include <stdio.h>
static void pulseout_success_cb(pa_stream *s, int success, void *userdata) {
(void)s;
(void)success;
struct pulseout_state *ps = userdata;
pa_threaded_mainloop_signal(ps->pa_tm, 0);
}
static void pulseout_pause(struct sound_state *ss, int pause, int flush) {
struct pulseout_state *ps = (struct pulseout_state *)ss;
if (ps->paused != !!pause) {
if (pause) {
pa_threaded_mainloop_lock(ps->pa_tm);
pa_operation *op_cork = pa_stream_cork(ps->pa_s, 1, pulseout_success_cb, ps);
while (pa_operation_get_state(op_cork) == PA_OPERATION_RUNNING)
pa_threaded_mainloop_wait(ps->pa_tm);
pa_operation_unref(op_cork);
}
ps->paused = pause;
if (!pause) {
//if (flush) ps->flush = true;
if (flush) {
pa_operation *op_flush = pa_stream_flush(ps->pa_s, pulseout_success_cb, ps);
if (op_flush) {
while (pa_operation_get_state(op_flush) == PA_OPERATION_RUNNING)
pa_threaded_mainloop_wait(ps->pa_tm);
pa_operation_unref(op_flush);
} else {
//fprintf(stderr, "FLUSH ERR\n");
}
}
pa_operation *op_cork = pa_stream_cork(ps->pa_s, 0, pulseout_success_cb, ps);
if (op_cork) {
while (pa_operation_get_state(op_cork) == PA_OPERATION_RUNNING)
pa_threaded_mainloop_wait(ps->pa_tm);
pa_operation_unref(op_cork);
} else {
//fprintf(stderr, "CORK ERR\n");
}
pa_threaded_mainloop_unlock(ps->pa_tm);
}
}
}
static void pulseout_free(struct sound_state *ss) {
struct pulseout_state *ps = (struct pulseout_state *)ss;
if (ps) {
if (ps->pa_tm) {
if (ps->paused) pa_threaded_mainloop_unlock(ps->pa_tm);
pa_threaded_mainloop_stop(ps->pa_tm);
if (ps->pa_s) {
pa_stream_disconnect(ps->pa_s);
pa_stream_unref(ps->pa_s);
}
if (ps->pa_c) pa_context_unref(ps->pa_c);
pa_threaded_mainloop_free(ps->pa_tm);
}
free(ps);
}
}
static void pulseout_cb(pa_stream *p, size_t bytes, void *userdata) {
struct pulseout_state *ps = userdata;
int16_t *buf;
pa_stream_begin_write(p, (void **)&buf, &bytes);
size_t nframes = bytes / (2 * sizeof(int16_t));
if (!ps->paused) {
ps->cbfunc(ps->userptr, buf, nframes);
} else {
for (size_t i = 0; i < nframes; i++) {
buf[i*2+0] = 0;
buf[i*2+1] = 0;
}
}
pa_seek_mode_t smode =
ps->flush ? PA_SEEK_RELATIVE_ON_READ : PA_SEEK_RELATIVE;
pa_stream_write(p, buf, nframes * 2 * sizeof(int16_t), 0, 0, smode);
ps->flush = false;
}
static void pa_c_cb(pa_context *pa_c, void *userdata) {
(void)pa_c;
struct pulseout_state *ps = userdata;
ps->pa_status_changed = true;
pa_threaded_mainloop_signal(ps->pa_tm, 0);
}
struct sound_state *pulseout_init(
const char *clientname, unsigned srate,
sound_callback cbfunc, void *userptr) {
struct pulseout_state *ps = malloc(sizeof(*ps));
if (!ps) goto err;
*ps = (struct pulseout_state){
.ss = {
.pause = pulseout_pause,
.free = pulseout_free,
.apiname = "PulseAudio",
},
.cbfunc = cbfunc,
.userptr = userptr,
.paused = false,
.srate = srate,
};
ps->pa_tm = pa_threaded_mainloop_new();
if (!ps->pa_tm) goto err;
ps->pa_c = pa_context_new(
pa_threaded_mainloop_get_api(ps->pa_tm), clientname
);
if (!ps->pa_c) goto err;
if (pa_context_connect(ps->pa_c, 0, 0, 0) < 0) goto err;
pa_context_set_state_callback(ps->pa_c, pa_c_cb, ps);
if (pa_threaded_mainloop_start(ps->pa_tm) < 0) goto err;
pa_threaded_mainloop_lock(ps->pa_tm);
ps->paused = true;
for (;;) {
while (!ps->pa_status_changed) {
pa_threaded_mainloop_wait(ps->pa_tm);
}
ps->pa_status_changed = false;
pa_context_state_t state = pa_context_get_state(ps->pa_c);
if (state == PA_CONTEXT_CONNECTING ||
state == PA_CONTEXT_AUTHORIZING ||
state == PA_CONTEXT_SETTING_NAME) continue;
else if (state == PA_CONTEXT_READY) break;
else goto err;
}
pa_sample_spec ss = {
.format = PA_SAMPLE_S16NE,
.rate = ps->srate,
.channels = 2,
};
ps->pa_s = pa_stream_new(
ps->pa_c,
"stereoout", &ss, 0
);
if (!ps->pa_s) goto err;
pa_stream_set_write_callback(ps->pa_s, pulseout_cb, ps);
pa_buffer_attr battr = {
.maxlength = -1,
.tlength = pa_usec_to_bytes(1000*1000/30, &ss),
.prebuf = -1,
.minreq = -1,
.fragsize = -1,
};
if (pa_stream_connect_playback(ps->pa_s, 0, &battr, PA_STREAM_ADJUST_LATENCY, 0, 0) < 0) goto err;
return &ps->ss;
err:
pulseout_free(&ps->ss);
return 0;
}
|
int maxDepth(TreeNode* root) {
if (root == nullptr) {
return 0;
}
int left_depth = maxDepth(root->left);
int right_depth = maxDepth(root->right);
return 1 + std::max(left_depth, right_depth);
} |
#!/bin/sh
# T R Y . S H
# BRL-CAD
#
# Copyright (c) 2004-2019 United States Government as represented by
# the U.S. Army Research Laboratory.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
###
#
# Script for testing comgeom-g
#
# $Header$
CASE=$1
if test x$2 = x
then
OPT=""
DB=foo.g
else
shift
OPT="$*"
DB=/dev/null
fi
case $CASE in
1)
comgeom-g $OPT -v4 m35.cg4 $DB
break
;;
2)
comgeom-g $OPT m35a2.cg5 $DB
break
;;
3)
comgeom-g $OPT apache.cg $DB
break
;;
4)
comgeom-g $OPT avlb.cg5 $DB
break
;;
5)
comgeom-g $OPT -v1 atr.cg1 $DB
break
;;
*)
echo "Try /$1/ unknown"
exit 1
break
;;
esac
# Local Variables:
# mode: sh
# tab-width: 8
# sh-indentation: 4
# sh-basic-offset: 4
# indent-tabs-mode: t
# End:
# ex: shiftwidth=4 tabstop=8
|
<filename>src/components/index.js
import CountDown from './CountDown/index.vue'
import ScanBarcode from './ScanBarcode/index.vue'
export {
CountDown,
ScanBarcode
} |
import hashlib
import secrets
class PasswordHasher:
def shifr_salt(self, passwd):
# Generate a random salt value
salt = secrets.token_hex(16)
# Combine the salt with the password
salted_password = salt + passwd
# Hash the combined salt and password using SHA-256
hashed_password = hashlib.sha256(salted_password.encode()).hexdigest()
# Return the hashed password along with the salt
return (hashed_password, salt) |
TERMUX_PKG_HOMEPAGE=https://www.xiph.org/ao/
TERMUX_PKG_DESCRIPTION="A cross platform audio library"
TERMUX_PKG_LICENSE="GPL-2.0"
TERMUX_PKG_MAINTAINER="@termux"
TERMUX_PKG_VERSION=1.2.2
TERMUX_PKG_REVISION=4
TERMUX_PKG_SRCURL=https://github.com/xiph/libao/archive/${TERMUX_PKG_VERSION}.tar.gz
TERMUX_PKG_SHA256=df8a6d0e238feeccb26a783e778716fb41a801536fe7b6fce068e313c0e2bf4d
TERMUX_PKG_DEPENDS="pulseaudio"
TERMUX_PKG_EXTRA_CONFIGURE_ARGS="--enable-pulse"
TERMUX_PKG_CONFFILES="etc/libao.conf"
termux_step_pre_configure () {
./autogen.sh
}
termux_step_post_make_install () {
#generate libao config file
mkdir -p $TERMUX_PREFIX/etc/
cat << EOF > $TERMUX_PREFIX/etc/libao.conf
default_driver=pulse
buffer_time=50
quiet
EOF
}
|
#ifndef _COMMON_H
#define _COMMON_H
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
#ifdef __cplusplus
# ifndef EXTERN_C
# define EXTERN_C extern "C"
# endif
# ifndef EXTERN_C_BEGIN
# define EXTERN_C_BEGIN extern "C" {
# endif
# ifndef EXTERN_C_END
# define EXTERN_C_END }
# endif
#else
# ifndef EXTERN_C
# define EXTERN_C
# endif
# ifndef EXTERN_C_BEGIN
# define EXTERN_C_BEGIN
# endif
# ifndef EXTERN_C_END
# define EXTERN_C_END
# endif
#endif
EXTERN_C_BEGIN
#define LIST_MAX 0xFF
bool SubValueList16(uint16_t *list, uint16_t item, int32_t size);
bool SubValueList32(uint32_t *list, uint32_t item, int32_t size);
bool SubValueList64(uint64_t *list, uint64_t item, int32_t size);
int16_t GetListSize16(uint16_t *list, uint16_t delim);
int32_t GetListSize32(uint32_t *list, uint32_t delim);
int64_t GetListSize64(uint64_t *list, uint64_t delim);
// TODO: GENERIC FUNCTIONS
EXTERN_C_END
#endif |
#!/usr/bin/env bash
# nbdkit
# Copyright (C) 2014-2020 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Red Hat nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY RED HAT AND CONTRIBUTORS ''AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RED HAT OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
# Test that the example in nbdkit-probing(1) man page works. The
# first version of nbdkit to include the version_major/version_minor
# strings was 1.16.5.
source ./functions.sh
set -x
set -e
requires cut --version
major=$( nbdkit --dump-config | grep ^version_major | cut -d= -f2 )
minor=$( nbdkit --dump-config | grep ^version_minor | cut -d= -f2 )
echo major=$major minor=$minor
if [ $major -ne 1 ]; then
echo "$0: version_major parsed by cut != 1"
exit 1
fi
if [ $minor -lt 16 ]; then
echo "$0: version_minor parsed by cut < 16"
exit 1
fi
|
<filename>backend-project/small_eod/events/tests/test_factories.py
from django.test import TestCase
from ...generic.tests.mixins import FactoryTestCaseMixin
from ..factories import EventFactory
from ..models import Event
class EventFactoryTestCase(FactoryTestCaseMixin, TestCase):
FACTORY = EventFactory
MODEL = Event
|
package org.smartregister.stock.util;
import java.util.HashMap;
import java.util.Map;
/**
* Manages stock information for a retail application.
*/
public class StockManager {
private Map<String, Integer> stock;
/**
* Initializes the stock manager with an empty stock.
*/
public StockManager() {
stock = new HashMap<>();
}
/**
* Adds the specified quantity of items to the stock.
*
* @param item the name of the item
* @param quantity the quantity to add
*/
public void addStock(String item, int quantity) {
stock.put(item, stock.getOrDefault(item, 0) + quantity);
}
/**
* Removes the specified quantity of items from the stock.
*
* @param item the name of the item
* @param quantity the quantity to remove
*/
public void removeStock(String item, int quantity) {
int currentStock = stock.getOrDefault(item, 0);
if (currentStock >= quantity) {
stock.put(item, currentStock - quantity);
} else {
// Handle insufficient stock error
System.out.println("Insufficient stock for " + item);
}
}
/**
* Retrieves the current stock level for the specified item.
*
* @param item the name of the item
* @return the current stock level
*/
public int getStockLevel(String item) {
return stock.getOrDefault(item, 0);
}
} |
class SwipeWindow:
"""Class for managing a queue of window objects.
"""
onIdEntered = None
onCancelled = None
openWindows = []
def add_window(self, window_id):
"""Add a new window to the queue."""
self.openWindows.append({"id": window_id})
def remove_window(self, window_id):
"""Remove a window from the queue."""
for window in self.openWindows:
if window["id"] == window_id:
self.openWindows.remove(window)
break
def next_window_id(self):
"""Retrieve the ID of the next window to be processed."""
if self.openWindows:
return self.openWindows[0]["id"]
else:
return None |
<reponame>Gesserok/Market
package ua.artcode.market.interfaces;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import ua.artcode.market.controllers.TerminalControllerFactory;
import ua.artcode.market.models.Bill;
import ua.artcode.market.models.BillComparator;
import ua.artcode.market.models.Product;
import ua.artcode.market.models.employee.Employee;
import ua.artcode.market.models.employee.Salesman;
import ua.artcode.market.models.money.Money;
import ua.artcode.market.utils.Generator;
import java.util.List;
import static org.junit.Assert.*;
import static ua.artcode.market.models.BillComparator.other;
public class ITerminalControllerTest {
private ITerminalController terminalController;
@Before
public void setUp() throws Exception {
this.terminalController = TerminalControllerFactory.create();
}
@After
public void tearDown() throws Exception {
this.terminalController = null;
}
@Test
public void createBill() throws Exception {
Employee employee = terminalController.
createSalesman("asd","asd","asd", new Money(123,3));
Bill expected = terminalController.createBill(employee);
assertEquals(0, expected.getProductsMap().size());
assertNotNull(expected);
assertEquals(1, expected.getId());
}
@Test
public void addProduct() throws Throwable {
Product product = Generator.createProduct();
product.setId(1);
Employee employee = terminalController.
createSalesman("asd","asd","asd", new Money(123,3));
terminalController.getIAppDb().saveProduct(product);
Bill bill = terminalController.createBill(employee);
bill = terminalController.addProduct(bill.getId(),
terminalController.getIAppDb().
findProductById(product.getId()));
terminalController.getIAppDb().saveBill(bill);
assertEquals(1, bill.getProductsMap().size());
}
@Test
public void getAllBills() throws Exception {
Employee employee = terminalController.
createSalesman("asd","asd","asd", new Money(123,3));
terminalController.createBill(employee);
terminalController.createBill(employee);
terminalController.createBill(employee);
terminalController.createBill(employee);
terminalController.createBill(employee);
assertEquals(5, terminalController.getBills().size());
}
@Test
public void closeBill() throws Exception {
Employee employee = terminalController.
createSalesman("asd","asd","asd", new Money(123,3));
Bill open = terminalController.createBill(employee);
Bill close = terminalController.closeBill(open.getId());
assertEquals(open, close);
assertNotNull(close.getCloseTime());
}
@Test
public void calculateAmountPriceNeg() throws Exception {
Money amPrice = terminalController.calculateAmountPrice(null);
assertTrue(new Money(0,0).equals(amPrice));
}
@Test
public void calculateAmountPrice() throws Exception {
Employee employee = null;
try {
employee = terminalController.createSalesman("ghjj","jhk","ghk", new Money(45,4));
} catch (Exception e) {
employee = new Salesman("ghjj","jhk","ghk", new Money(45,4));
}
Bill open = terminalController.createBill(employee);
Product product = Generator.createProduct();
open.toString();
open.getOpenTime();
open.getAmountPrice();
open.setAmountPrice(new Money(0,0));
terminalController.getIAppDb().getProducts().put(product, 15);
open = terminalController.addProduct(open.getId(), product);
Money amountPrice = terminalController.calculateAmountPrice(open);
product.getPrice();
product.getId();
product.getName();
assertNotEquals(0.0, amountPrice);
assertNotEquals(0.0, open.getAmountPrice());
}
@Test
public void createSalesman() throws Exception {
Employee salesman = null;
try {
salesman = terminalController.
createSalesman("asdf", "1sdfg", "sdfsas", new Money(0, 0));
System.out.println(salesman);
salesman.getLogin();
salesman.getFullName();
salesman.getPassword();
salesman.toString();
Employee salesman2 = terminalController.
createSalesman("2", "2", "2", new Money(10, 0));
salesman2.setLogin("2");
salesman2.setPassword("2");
salesman2.setFullName("2");
assertFalse(salesman.equals(salesman2));
assertNotEquals(null, salesman);
} catch (Exception e) {
assertNull(salesman);
}
}
// @Test
// public void login() throws Exception {
// Employee salesman = terminalController.createSalesman("1", "1", "1");
// salesman = terminalController.login("1", "1");
// assertNotEquals(null, salesman);
// }
@Test
public void saveAndRemoveBill() throws Exception {
Employee salesman = null;
try {
salesman = terminalController.
createSalesman("1", "1", "1", new Money(0, 0));
} catch (Exception e) {
salesman = new Salesman("1", "1", "1", new Money(0, 0));
}
salesman.getLogin();
Bill expected = terminalController.createBill(salesman);
Bill expectedReturn = terminalController.getIAppDb().saveBill(expected);
Bill acttual = terminalController.getIAppDb().
removeBill(expectedReturn.getId());
terminalController.prinBill(expected);
assertEquals(expectedReturn,acttual);
}
@Test
public void filter() throws Exception {
Product product1 = new Product("asd", new Money(10, 0));
Product product2 = new Product("asd1", new Money(20, 0));
Product product3 = new Product("asd2", new Money(3, 0));
product1.setName("1");
product1.setId(1);
product2.setName("2");
product2.setId(2);
product3.setName("3");
product3.setId(3);
product1.setPrice(new Money(1, 0));
product2.setPrice(new Money(123, 0));
product3.setPrice(new Money(12313, 42));
Employee salesman1 = null;
Employee salesman2 = null;
Employee salesman3 = null;
try {
salesman1 = terminalController.
createSalesman("1123", "123", "1", new Money(0, 0));
salesman2 = terminalController.
createSalesman("2", "12", "1", new Money(0, 0));
salesman3 = terminalController.
createSalesman("2", "13", "1", new Money(0, 0));
} catch (Exception e) {
salesman1 = new Salesman("d","we","re",new Money(1,1));
salesman2 = new Salesman("ad","wse","rse",new Money(1,1));
salesman3 = new Salesman("ad","wsd","res",new Money(1,1));
}
terminalController.getIAppDb().getProducts().put(product1, 15);
terminalController.getIAppDb().getProducts().put(product2, 15);
terminalController.getIAppDb().getProducts().put(product3, 15);
Bill bill1 = terminalController.createBill(salesman1);
terminalController.addProduct(bill1.getId(), product1);
terminalController.addProduct(bill1.getId(), product1);
terminalController.addProduct(bill1.getId(), product1);
Bill bill2 = terminalController.createBill(salesman2);
terminalController.addProduct(bill2.getId(), product1);
terminalController.addProduct(bill2.getId(), product2);
terminalController.addProduct(bill2.getId(), product2);
Bill bill3 = terminalController.createBill(salesman3);
terminalController.addProduct(bill3.getId(), product3);
terminalController.addProduct(bill3.getId(), product2);
terminalController.addProduct(bill3.getId(), product2);
Bill bill4 = terminalController.createBill(salesman3);
terminalController.addProduct(bill4.getId(), product3);
terminalController.addProduct(bill4.getId(), product3);
terminalController.addProduct(bill4.getId(), product1);
bill3.setEmployee(salesman3);
bill2.setEmployee(salesman2);
bill1.setEmployee(salesman1);
bill4.setEmployee(salesman1);
terminalController.closeBill(bill1.getId());
terminalController.closeBill(bill2.getId());
terminalController.closeBill(bill3.getId());
terminalController.closeBill(bill4.getId());
// terminalController.logout((Salesman) salesman1);
terminalController.prinBill(bill1);
terminalController.getIAppDb();
terminalController.getBills();
terminalController.calculateAmountPrice(bill1);
List<Bill> sorted = terminalController.getIAppDb().filter(salesman1,
product1, null, null, BillComparator.
billComparator.thenComparing(other));
terminalController.getIAppDb().removeBill(1);
terminalController.getIAppDb().averageAmountPrice((Salesman) salesman1,null, null);
terminalController.getIAppDb().minAmountPrice((Salesman) salesman1,null, null);
terminalController.getIAppDb().maxAmountPrice((Salesman) salesman1,null, null);
/* LocalDateTime.MIN, LocalDateTime.MAX*/
// assertTrue(sorted.get(0).getAmountPrice() >=
// sorted.get(1).getAmountPrice());
assertTrue(sorted.size() == 2);
}
} |
import useSWR from "swr";
import { ResponsiveContainer, LineChart, Line, CartesianGrid, XAxis, YAxis, Tooltip, Legend } from 'recharts';
import qs from '../../lib/qs';
import type { TLocation } from '../Select/index.d';
import type {
TWeatherProps,
TWeatherElement,
TWeatherElementProps,
TFormattedWeatherElement
} from './index.d';
import styles from './styles.module.css';
const token = process.env.CWBToken;
function formatWeather(weather: TWeatherElement[]){
function findWeatherElement(weather: TWeatherElement[], name: string){
return weather.find(i => i.elementName === name) || weather[0];
}
let WS = findWeatherElement(weather, 'WS').time.map(i => ({
time: i.dataTime,
'風速(m/s)': i.elementValue[0].value
})) as TFormattedWeatherElement[];
let PoP12h = findWeatherElement(weather, 'PoP12h').time.map(i => ({
time: i.startTime,
'降雨機率(%)': i.elementValue[0].value
})) as TFormattedWeatherElement[];
let AT = findWeatherElement(weather, 'AT').time.map(i => ({
time: i.dataTime,
'體感溫度(°C)': i.elementValue[0].value
})) as TFormattedWeatherElement[];
return {
raw: weather,
WS,
PoP12h,
AT
};
}
async function fetchWeather(city: TLocation){
const url = 'https://opendata.cwb.gov.tw/api/v1/rest/datastore';
const query = qs({
Authorization: token,
locationName: encodeURI(city.label)
});
// console.log('fetch url', `${url}/${city.value}?${query}`);
return fetch(`${url}/${city.value.split('--')[1]}?${query}`)
.then(res => res.json())
.then(res => res.records.locations[0].location[0].weatherElement)
.then(res => formatWeather(res))
.catch(console.error);
}
function WeatherElement({ weatherEle, value }: TWeatherElementProps){
return (
<ResponsiveContainer width="90%" height={300} >
<LineChart data={weatherEle}>
<CartesianGrid stroke="#ccc" strokeDasharray="3 3" y={10} />
<XAxis dataKey="time" />
<YAxis padding={{ top: 30 }} />
<Legend />
<Tooltip />
<Line type="natural" dataKey={value} stroke="#8884d8" />
</LineChart>
</ResponsiveContainer>
)
}
export default function PredictDisplay({ city }: TWeatherProps){
const { data: weather, error } = useSWR(
() => `fetch ${city.label}`,
() => fetchWeather(city)
);
if(error) return <pre>{error}</pre>;
if(!weather) return <h3>Loading ...</h3>;
if(error) return <h3>Error: {error}</h3>;
else return (
<>
<div className={styles.weatherDisplay}>
<WeatherElement weatherEle={weather.WS} value='風速(m/s)'/>
<WeatherElement weatherEle={weather.PoP12h} value='降雨機率(%)' />
<WeatherElement weatherEle={weather.AT} value='體感溫度(°C)' />
</div>
{/*
<pre>{JSON.stringify(weather.raw, null, 2)}</pre>
*/}
</>
);
}
|
import xml.etree.ElementTree as ET
# XML document string
xml_data = '''
<languages>
<language>
<name>Python</name>
<popularity>10</popularity>
<age>30</age>
</language>
<language>
<name>Java</name>
<popularity>8</popularity>
<age>25</age>
</language>
<language>
<name>C++</name>
<popularity>7</popularity>
<age>35</age>
</language>
</languages>
'''
# Parse the XML
root = ET.fromstring(xml_data)
# Extract data and perform calculations
popularity_sum = 0
oldest_age = 0
languages_count = 0
for language in root.findall('language'):
name = language.find('name').text
popularity = int(language.find('popularity').text)
age = int(language.find('age').text)
print(f"Language: {name}, Popularity: {popularity}, Age: {age}")
popularity_sum += popularity
if age > oldest_age:
oldest_age = age
languages_count += 1
average_popularity = popularity_sum / languages_count
print(f"Average Popularity: {average_popularity}")
print(f"Oldest Language Age: {oldest_age}") |
FLAG=1
if ! type python2 2>&1 > /dev/null ; then
echo "Python 2 is not installed, exiting.."
exit 1
fi
if [ -n "$BASH_VERSION" ]; then
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
echo "source $SCRIPT_DIR/command_not_found_bash" >> $HOME/.bashrc
echo "Added source to .bashrc"
FLAG=0
fi
if [ -n "$ZSH_VERSION" ]; then
CURRENT_DIR=${(%):-%x}
SCRIPT_DIR="$( cd "$( dirname "${CURRENT_DIR}" )" && pwd )"
echo "source $SCRIPT_DIR/command_not_found_zsh" >> $HOME/.zshrc
echo "Added source to .zshrc"
FLAG=0
fi
[ $FLAG -ne 0 ] && echo "Could not find either bash nor zsh, exiting.." && exit 1
|
def Fibonacci(n):
if n<0:
print("Incorrect input")
elif n==1:
return 0
elif n==2:
return 1
else:
return Fibonacci(n-1)+Fibonacci(n-2)
# Driver Program
print(Fibonacci(9)) |
package config
import (
"bytes"
"encoding/json"
"io/ioutil"
"log"
"text/template"
"github.com/spf13/viper"
)
// ReadConfig read json environment file from directory
func ReadConfig(filename string) (*viper.Viper, error) {
v := viper.New()
v.SetConfigName(filename)
v.AddConfigPath("/opt/scripts/config")
v.AutomaticEnv()
err := v.ReadInConfig()
return v, err
}
// WriteJSONToFile write json file
func WriteJSONToFile(path string, i interface{}) error {
data, _ := json.Marshal(i)
err := ioutil.WriteFile(path, data, 0644)
if err != nil {
log.Fatalf("Error: %v\n\n", err)
return err
}
return nil
}
// WriteToFile write file
func WriteToFile(path string, s string) error {
err := ioutil.WriteFile(path, []byte(s), 0644)
if err != nil {
log.Fatalf("Error: %v\n\n", err)
return err
}
return nil
}
// PrettyJSON print json file in pretty format
func PrettyJSON(i interface{}) string {
data, err := json.MarshalIndent(i, "", " ")
if err != nil {
log.Fatalln("MarshalIndent:", err)
}
return string(data)
}
// ParseTemplate is parse struct variables in different templates for configuration files
func ParseTemplate(templateFileName string, data interface{}) string {
t, err := template.ParseFiles(templateFileName)
if err != nil {
log.Println(err)
}
buf := new(bytes.Buffer)
if err = t.Execute(buf, data); err != nil {
log.Println(err)
}
return buf.String()
}
|
#!/bin/bash
#for ff in *.pptx; do mkdir ${ff%%.*}; pushd ${ff%%.*}; unzip -qc "../${ff}" ppt/slides/slide*.xml | grep -oP '(?<=\<a:t\>).*?(?=\</a:t\>)' > text; popd; done
_SOLUTION=
_getNextLine() {
cat $1/text | head -$2 | tail -1
}
_nextStep() {
IFS=', '; arrIN=($@); unset IFS;
local _nextCHAR=${arrIN[0]}
local _nextFILE=${arrIN[2]}
local _nextLINE=${arrIN[4]}
_SOLUTION="${_SOLUTION}${_nextCHAR}"
if [[ -f "${_nextFILE}" ]]; then
_nextStep $(_getNextLine ${_nextFILE%%.*} ${_nextLINE})
fi
}
_nextStep `_getNextLine START 1`
echo ${_SOLUTION}
|
<reponame>smagill/opensphere-desktop
package io.opensphere.controlpanels.layers.layerpopout.model.v1;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.StringReader;
import java.io.StringWriter;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.JAXBException;
import javax.xml.bind.Marshaller;
import javax.xml.bind.Unmarshaller;
import org.junit.Test;
/**
* Tests the ability to serialize and deserialize a PopoutModel.
*/
public class PopoutModelTest
{
/**
* Tests the ability to marshal and unmarshal the Popou model classes.
*
* @throws JAXBException A jaxb exception.
*/
@Test
public void test() throws JAXBException
{
PopoutModel model1 = new PopoutModel();
model1.setHeight(1);
model1.setWidth(2);
model1.setX(3);
model1.setY(4);
model1.setTitle("model1");
model1.getDataGroupInfoKeys().add("key1");
model1.getDataGroupInfoKeys().add("key2");
model1.getDataGroupInfoKeys().add("key3");
JAXBContext context = JAXBContext.newInstance(PopoutModel.class);
Marshaller marshaller = context.createMarshaller();
StringWriter stringWriter = new StringWriter();
marshaller.marshal(model1, stringWriter);
StringReader stringReader = new StringReader(stringWriter.getBuffer().toString());
Unmarshaller unmarshaller = context.createUnmarshaller();
PopoutModel actualModel1 = (PopoutModel)unmarshaller.unmarshal(stringReader);
assertEquals(model1.getId(), actualModel1.getId());
assertEquals(model1.getHeight(), actualModel1.getHeight());
assertEquals(model1.getWidth(), actualModel1.getWidth());
assertEquals(model1.getX(), actualModel1.getX());
assertEquals(model1.getY(), actualModel1.getY());
assertEquals(model1.getTitle(), actualModel1.getTitle());
assertTrue(actualModel1.getDataGroupInfoKeys().contains("key1"));
assertTrue(actualModel1.getDataGroupInfoKeys().contains("key2"));
assertTrue(actualModel1.getDataGroupInfoKeys().contains("key3"));
}
}
|
$(document).ready(function()
{
select2Ubicacion('#pais', '#estado', '#ciudad');
$('#pais, #estado, #ciudad').select2();
$("#fotos").fileinput({
// showDelete: true,
showUpload: false,
// showCaption: false,
// showPreview: false,
allowedFileExtensions: ["jpg", "png", "jpeg"],
//maxImageWidth: 1250,
// maxImageHeight: 250,
// maxFileCount: 20,
maxFileSize: 5000,
language: 'es',
});
$('#btnCrearDestino').on('click', function(e){
var form = $('#formCrearDestino');
form.validate ({
rules:{
pais :{ required:true },
estado :{ required:true },
ciudad :{ required:true },
nombre :{ required:true },
direccion :{ required:true }
}
});
if (form.valid()){
guardar_registro('formCrearDestino', '/destinos/listar', '#btnCrearDestino');
}
});
});
|
<reponame>djfkahn/MemberHubDirectoryTools
import unittest
from unittest.mock import patch
import os
import family
import hub_map_tools
import roster
import person
data_file_path = os.path.abspath("./family_tests/")
hub_file_name = data_file_path + "/hub_map.csv"
common_hub_map = hub_map_tools.ReadHubMapFromFile(hub_file_name)
with patch('builtins.input', side_effect=['y']):
common_RosterC = roster.Roster()
class UT_01_AddAdultsFromCombinedField(unittest.TestCase):
def test_01_two_parents(self):
result = family.RosterFamily(adults_raw_name='A and B C')
result.AddAdultsFromCombinedField('<NAME>', 'A and B C', common_hub_map, common_RosterC)
self.assertEqual(2, len(result.adults))
self.assertEqual('A', result.adults[0].first_name)
self.assertEqual('C', result.adults[0].last_name)
self.assertEqual(['0000'],result.adults[0].hubs)
self.assertEqual('Adult',result.adults[0].family_relation)
self.assertEqual('B', result.adults[1].first_name)
self.assertEqual('C', result.adults[1].last_name)
self.assertEqual(['0000'],result.adults[1].hubs)
self.assertEqual('Adult2',result.adults[1].family_relation)
self.assertEqual(0, len(result.children))
def test_02_one_parent(self):
result = family.RosterFamily(adults_raw_name='A C')
result.AddAdultsFromCombinedField('<NAME>', 'A C', common_hub_map, common_RosterC)
self.assertEqual(1, len(result.adults))
self.assertEqual('A', result.adults[0].first_name)
self.assertEqual('C', result.adults[0].last_name)
self.assertEqual(['1111'],result.adults[0].hubs)
self.assertEqual('Adult',result.adults[0].family_relation)
self.assertEqual(0, len(result.children))
class UT_02_Roster_AddToFamily(unittest.TestCase):
def test_01_two_parents(self):
result = family.RosterFamily(adults_raw_name='A and B C')
result.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'A and B C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertEqual(2, len(result.adults))
self.assertEqual('A', result.adults[0].first_name)
self.assertEqual('C', result.adults[0].last_name)
self.assertEqual(['0000'],result.adults[0].hubs)
self.assertEqual('Adult',result.adults[0].family_relation)
self.assertEqual('B', result.adults[1].first_name)
self.assertEqual('C', result.adults[1].last_name)
self.assertEqual(['0000'],result.adults[1].hubs)
self.assertEqual('Adult2',result.adults[1].family_relation)
self.assertEqual(1, len(result.children))
self.assertEqual('D', result.children[0].first_name)
self.assertEqual('C', result.children[0].last_name)
self.assertEqual(['0000'],result.children[0].hubs)
self.assertEqual('Child1',result.children[0].family_relation)
def test_02_one_parent(self):
result = family.RosterFamily(adults_raw_name='A C')
result.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertEqual(1, len(result.adults))
self.assertEqual('A', result.adults[0].first_name)
self.assertEqual('C', result.adults[0].last_name)
self.assertEqual(['1111'],result.adults[0].hubs)
self.assertEqual('Adult',result.adults[0].family_relation)
self.assertEqual(1, len(result.children))
self.assertEqual('D', result.children[0].first_name)
self.assertEqual('C', result.children[0].last_name)
self.assertEqual(['1111'],result.children[0].hubs)
self.assertEqual('Child1',result.children[0].family_relation)
def test_03_6th_grader(self):
result = family.RosterFamily(adults_raw_name='A C')
result.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '6',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertEqual(1, len(result.adults))
self.assertEqual('A', result.adults[0].first_name)
self.assertEqual('C', result.adults[0].last_name)
self.assertEqual(['6666'],result.adults[0].hubs)
self.assertEqual('Adult',result.adults[0].family_relation)
self.assertEqual(1, len(result.children))
self.assertEqual('D', result.children[0].first_name)
self.assertEqual('C', result.children[0].last_name)
self.assertEqual(['6666'],result.children[0].hubs)
self.assertEqual('Child1',result.children[0].family_relation)
def test_04_8th_grader(self):
result = family.RosterFamily(adults_raw_name='A C')
result.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '8',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertEqual(1, len(result.adults))
self.assertEqual('A', result.adults[0].first_name)
self.assertEqual('C', result.adults[0].last_name)
self.assertEqual(['8888'],result.adults[0].hubs)
self.assertEqual('Adult',result.adults[0].family_relation)
self.assertEqual(1, len(result.children))
self.assertEqual('D', result.children[0].first_name)
self.assertEqual('C', result.children[0].last_name)
self.assertEqual(['8888'],result.children[0].hubs)
self.assertEqual('Child1',result.children[0].family_relation)
def test_05_9th_grader(self):
result = family.RosterFamily(adults_raw_name='A C')
result.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '9',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertEqual(0, len(result.adults))
self.assertEqual(0, len(result.children))
def test_06_Unknown_Teacher(self):
result = family.RosterFamily(adults_raw_name='A C')
result.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '5',
adult_names = 'A C',
teacher_name = 'Unknown Teacher',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertEqual(0, len(result.adults))
self.assertEqual(0, len(result.children))
class UT_03_Directory_AddToFamily(unittest.TestCase):
def test_01_adult_input(self):
result = family.DirectoryFamily('5678')
result.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertEqual(1, len(result.adults))
self.assertEqual('A', result.adults[0].first_name)
self.assertEqual('C', result.adults[0].last_name)
self.assertEqual('1234', result.adults[0].person_id)
self.assertEqual('5678', result.adults[0].family_id)
self.assertEqual('Adult',result.adults[0].family_relation)
self.assertEqual('email',result.adults[0].email)
self.assertEqual(['0000'],result.adults[0].hubs)
self.assertEqual(0, len(result.children))
def test_02_child_input(self):
result = family.DirectoryFamily('5678')
result.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertEqual(0, len(result.adults))
self.assertEqual(1, len(result.children))
def test_03_adult_lower_input(self):
result = family.DirectoryFamily('5678')
result.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertEqual(1, len(result.adults))
self.assertEqual(0, len(result.children))
def test_04_child_lower_input(self):
result = family.DirectoryFamily('5678')
result.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertEqual(0, len(result.adults))
self.assertEqual(1, len(result.children))
def test_05_other_input(self):
result = family.DirectoryFamily('5678')
result.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Other',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertEqual(0, len(result.adults))
self.assertEqual(0, len(result.children))
class UT_04_IsSameFamily(unittest.TestCase):
def test_01_same_family(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertTrue(this.IsSameFamily(that))
def test_02_same_adult_different_child(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'E',
child_last = 'C',
grade = '0',
adult_names = '<NAME>',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertTrue(this.IsSameFamily(that))
def test_03_directory_orphan(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'E',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertFalse(this.IsSameFamily(that))
def test_04_roster_orphan(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name=' ')
that.AddToFamily(child_first = 'E',
child_last = 'C',
grade = '0',
adult_names = ' ',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertFalse(this.IsSameFamily(that))
def test_05_different_adult_same_child(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='E C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'E C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertFalse(this.IsSameFamily(that))
def test_06_more_adults_in_directory(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1236',
last_name = 'C',
first_name = 'B',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = '<NAME>',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertTrue(this.IsSameFamily(that))
def test_07_more_adults_in_roster(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertTrue(this.IsSameFamily(that))
class UT_05_HasNewChildren(unittest.TestCase):
def test_01_same_family(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertFalse(this.HasNewChildren(that))
def test_02_same_adult_different_child(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'E',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertTrue(this.HasNewChildren(that))
def test_03_directory_orphan(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'E',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertTrue(this.HasNewChildren(that))
def test_04_roster_orphan(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'E',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertTrue(this.HasNewChildren(that))
def test_05_different_adult_same_child(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = '<NAME>',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertFalse(this.HasNewChildren(that))
def test_06_more_adults_in_directory(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1236',
last_name = 'C',
first_name = 'B',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertFalse(this.HasNewChildren(that))
def test_07_more_adults_in_roster(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertFalse(this.HasNewChildren(that))
class UT_06_FormFamilyWithNewChildren(unittest.TestCase):
def test_01_family_with_new_child(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'E',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
result = family.Family()
result.FormFamilyWithNewChildren(this, that)
self.assertEqual(result.adults, this.adults)
self.assertEqual(1,len(result.children))
self.assertEqual('E',result.children[0].first_name)
self.assertEqual('C',result.children[0].last_name)
def test_02_family_without_new_child(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
result = family.Family()
result.FormFamilyWithNewChildren(this, that)
self.assertEqual(result.adults, this.adults)
self.assertEqual(0,len(result.children))
class UT_07_CombineWith(unittest.TestCase):
def test_01_add_new_child(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'E',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
this.CombineWith(that)
self.assertEqual(2,len(this.children))
self.assertEqual('D',this.children[0].first_name)
self.assertEqual('C',this.children[0].last_name)
self.assertEqual('E',this.children[1].first_name)
self.assertEqual('C',this.children[1].last_name)
def test_02_existing_child(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
this.CombineWith(that)
self.assertEqual(1,len(this.children))
self.assertEqual('D',this.children[0].first_name)
self.assertEqual('C',this.children[0].last_name)
def test_03_different_family(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'B',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A D')
that.AddToFamily(child_first = 'E',
child_last = 'C',
grade = '0',
adult_names = 'A D',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
this.CombineWith(that)
self.assertEqual(1,len(this.children))
self.assertEqual('B',this.children[0].first_name)
self.assertEqual('C',this.children[0].last_name)
class UT_08_IsChildless(unittest.TestCase):
def test_01_parent_and_chlid(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertFalse(this.IsChildless())
def test_02_parent_no_chlid(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertTrue(this.IsChildless())
def test_03_teacher(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Teachers'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertFalse(this.IsChildless())
def test_04_Staff(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Staff'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertFalse(this.IsChildless())
def test_05_volunteer(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Volunteers'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertFalse(this.IsChildless())
class UT_09_IsOrphan(unittest.TestCase):
def test_01_child_and_parent(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertFalse(this.IsOrphan())
def test_02_child_no_parent(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertTrue(this.IsOrphan())
class UT_10_FindAdultInFamily(unittest.TestCase):
def test_01_one_adult_one_match(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
to_find = person.RosterPerson('C', 'A', 'Adult', '<NAME>', common_hub_map)
self.assertIsNotNone(this.FindAdultInFamily(to_find))
def test_02_two_adult_one_match(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
to_find = person.RosterPerson('C', 'A', 'Adult', '<NAME>', common_hub_map)
self.assertIsNotNone(this.FindAdultInFamily(to_find))
def test_03_three_adult_one_match(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1236',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
to_find = person.RosterPerson('C', 'A', 'Adult', '<NAME>', common_hub_map)
self.assertIsNotNone(this.FindAdultInFamily(to_find))
def test_04_two_adult_no_match(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
to_find = person.RosterPerson('C', 'E', 'Adult', '<NAME>', common_hub_map)
self.assertIsNone(this.FindAdultInFamily(to_find))
def test_05_no_adult(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
to_find = person.RosterPerson('C', 'A', 'Adult', '<NAME>', common_hub_map)
self.assertIsNone(this.FindAdultInFamily(to_find))
class UT_11_FindChildInFamily(unittest.TestCase):
def test_01_one_child_one_match(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
to_find = person.RosterPerson('C', 'A', 'Child', '<NAME>', common_hub_map)
self.assertIsNotNone(this.FindChildInFamily(to_find))
def test_02_two_child_one_match(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'B',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
to_find = person.RosterPerson('C', 'A', 'Child', '<NAME>', common_hub_map)
self.assertIsNotNone(this.FindChildInFamily(to_find))
def test_03_three_child_one_match(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'B',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1236',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
to_find = person.RosterPerson('C', 'A', 'Child', '<NAME>', common_hub_map)
self.assertIsNotNone(this.FindChildInFamily(to_find))
def test_04_three_child_no_match(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'B',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1236',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
to_find = person.RosterPerson('C', 'E', 'Child', '<NAME>', common_hub_map)
self.assertIsNone(this.FindChildInFamily(to_find))
def test_05_no_child(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'B',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
to_find = person.RosterPerson('C', 'A', 'Child', '<NAME>', common_hub_map)
self.assertIsNone(this.FindChildInFamily(to_find))
if __name__ == '__main__':
unittest.main() |
<reponame>multiplex/multiplex.js<gh_stars>10-100
(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? factory(require('../../multiplex')) :
typeof define === 'function' && define.amd ? define(['../../multiplex'], factory) :
(global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global.mx));
}(this, (function (mx) { 'use strict';
function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
var mx__default = /*#__PURE__*/_interopDefaultLegacy(mx);
var array = [1, 2, 3, 4, 5];
var enumerable = mx__default['default'].range(1, 5);
var collection = new mx__default['default'].Collection(array);
var list = new mx__default['default'].List(array);
var linkedList = new mx__default['default'].LinkedList(array);
var hashSet = new mx__default['default'].HashSet(array);
var stack = new mx__default['default'].Stack(array);
var queue = new mx__default['default'].Queue(array);
var set = new mx__default['default'].Set(array);
var map = new mx__default['default'].Map();
var dictionary = new mx__default['default'].Dictionary();
var sortedList = new mx__default['default'].SortedList();
var readOnlyCollection = list.asReadOnly();
var lookup = new mx__default['default'].Lookup(array, function (t) {
return t;
});
for (var i = 0; i < array.length; i++) {
map.set(array[i], array[i]);
dictionary.set(array[i], array[i]);
sortedList.add(array[i], array[i]);
}
var qunit = typeof QUnit === 'undefined' ? require('qunitjs') : QUnit;
var qmodule = qunit.module;
var qtest = qunit.test;
qunit.expect;
qmodule('linq-take');
qtest('basic take test', function (assert) {
assert.equal(mx__default['default'](array).take(2).count(), 2, 'Test take in an array');
assert.equal(mx__default['default']([]).take(2).count(), 0, 'Test take empty array');
assert.equal(mx__default['default'](array).take(10).count(), array.length, 'Test take more than array size');
assert.equal(mx__default['default'](array).take(-10).count(), 0, 'Test take negative number');
assert.deepEqual(mx__default['default']([1, 2, 3, 4]).take(2).toArray(), [1, 2], 'Test take values on array');
assert.deepEqual(mx__default['default'].range(1, 4).take(2).toArray(), [1, 2], 'Test take values on array');
});
qtest('collections take method tests', function (assert) {
var count = 2;
assert.equal(mx__default['default'](enumerable).take(count).count(), count, 'Test take numbers in an enumerable are less than 10');
assert.equal(collection.take(count).count(), count, 'Test take numbers in a Collection are less than 10');
assert.equal(list.take(count).count(), count, 'Test take numbers in a List are less than 10');
assert.equal(readOnlyCollection.take(count).count(), count, 'Test take numbers in a ReadOnlyCollection are less than 10');
assert.equal(linkedList.take(count).count(), count, 'Test take numbers in a LinkedList are less than 10');
assert.equal(hashSet.take(count).count(), count, 'Test take numbers in a HashSet are less than 10');
assert.equal(stack.take(count).count(), count, 'Test take numbers in a Stack are less than 10');
assert.equal(queue.take(count).count(), count, 'Test take numbers in a Queue are less than 10');
assert.equal(set.take(count).count(), count, 'Test take numbers in a Set are less than 10');
assert.equal(map.take(count).count(), count, 'Test take numbers in a Map are less than 10');
assert.equal(dictionary.take(count).count(), count, 'Test take numbers in a Dictionary are less than 10');
assert.equal(lookup.take(count).count(), count, 'Test take numbers in a Lookup are less than 10');
assert.equal(sortedList.take(count).count(), count, 'Test take numbers in a SortedList are less than 10');
});
qtest('take method validations', function (assert) {
assert.throws(function () {
mx__default['default']([1]).take('a');
}, 'non number count');
});
})));
|
<gh_stars>1-10
package com.yl.appleweather.activity;
import android.app.ProgressDialog;
import android.content.Intent;
import android.graphics.Color;
import android.os.Build;
import android.os.Bundle;
import android.support.v4.widget.SwipeRefreshLayout;
import android.support.v7.app.AppCompatActivity;
import android.view.LayoutInflater;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.LinearLayout;
import android.widget.TextView;
import com.bumptech.glide.Glide;
import com.google.gson.Gson;
import com.yl.appleweather.R;
import com.yl.appleweather.gson.DailyForecast;
import com.yl.appleweather.gson.HourlyForecast;
import com.yl.appleweather.gson.Weather;
import com.yl.appleweather.presenter.MainPresenter;
import com.yl.appleweather.util.ToastUtil;
import com.yl.appleweather.view.MainView;
import java.text.SimpleDateFormat;
import java.util.Date;
public class MainActivity extends AppCompatActivity implements MainView,
View.OnClickListener, SwipeRefreshLayout.OnRefreshListener {
private SwipeRefreshLayout refreshLayout;
private ImageView bgImage;
private TextView cityName;
private TextView weatherText;
private TextView temperature;
private TextView dayName;
private TextView maxTemp;
private TextView minTemp;
private LinearLayout hourlyForecast;
private LinearLayout dailyForecast;
private TextView airText, aqiText, pm25Text;
private TextView rainText, pressText, bodyTempText,
windText, visibilityText, wetText;
private Button showMarkedCounty;
private ProgressDialog progressDialog;
private MainPresenter mPresenter = new MainPresenter(this);
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
bgImage = (ImageView) findViewById(R.id.bg_image);
refreshLayout = (SwipeRefreshLayout) findViewById(R.id.refresh_weather);
cityName = (TextView) findViewById(R.id.city_name);
weatherText = (TextView) findViewById(R.id.weather_text);
temperature = (TextView) findViewById(R.id.temperature);
dayName = (TextView) findViewById(R.id.day_name);
maxTemp = (TextView) findViewById(R.id.max_temperature);
minTemp = (TextView) findViewById(R.id.min_temperature);
hourlyForecast = (LinearLayout) findViewById(R.id.hourly_forecast_view);
dailyForecast = (LinearLayout) findViewById(R.id.daily_forecast_view);
airText = (TextView) findViewById(R.id.air_text);
aqiText = (TextView) findViewById(R.id.aqi_text);
pm25Text = (TextView) findViewById(R.id.pm25_text);
rainText = (TextView) findViewById(R.id.rain_text);
pressText = (TextView) findViewById(R.id.press_text);
bodyTempText = (TextView) findViewById(R.id.body_temp_text);
windText = (TextView) findViewById(R.id.wind_text);
visibilityText = (TextView) findViewById(R.id.visibility_text);
wetText = (TextView) findViewById(R.id.wet_text);
showMarkedCounty = (Button) findViewById(R.id.show_marked_county);
refreshLayout.setOnRefreshListener(this);
showMarkedCounty.setOnClickListener(this);
init();
}
public void init() {
if (Build.VERSION.SDK_INT > 19) {
View decorView = getWindow().getDecorView();
decorView.setSystemUiVisibility(View.SYSTEM_UI_FLAG_LAYOUT_FULLSCREEN |
View.SYSTEM_UI_FLAG_LAYOUT_STABLE);
getWindow().setStatusBarColor(Color.TRANSPARENT);
}
loadBackgroundImg();
String weatherJson = mPresenter.getWeatherPrefs();
if (weatherJson == null) {
chooseArea();
} else {
showWeatherInfo();
refreshLayout.setRefreshing(true);
mPresenter.refreshWeatherInfo(mPresenter.getCurrentWeatherId());
}
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
switch (requestCode) {
case 1:
if (resultCode == RESULT_OK) {
String weatherId = data.getStringExtra("weather_id");
String currentWeatherId = mPresenter.getCurrentWeatherId();
if (!weatherId.equals(currentWeatherId)) {
requestWeather(weatherId);
}
}
break;
case 2:
if (resultCode == RESULT_OK) {
String weatherId = data.getStringExtra("weather_id");
requestWeather(weatherId);
}
break;
default:
break;
}
}
@Override
public void chooseArea() {
startActivityForResult(new Intent(MainActivity.this,
ChooseAreaActivity.class), 2);
}
@Override
public void showCountyList() {
startActivityForResult(new Intent(MainActivity.this,
MarkedCountyActivity.class), 1);
}
@Override
public void showWeatherInfo() {
String weatherJson = mPresenter.getWeatherPrefs();
Weather weather = new Gson().fromJson(weatherJson, Weather.class);
cityName.setText(weather.basic.cityName);
weatherText.setText(weather.now.more.info);
temperature.setText(weather.now.temperature + "℃");
Date date = new Date(System.currentTimeMillis());
dayName.setText(new SimpleDateFormat("EEEE").format(date) + " 今天");
maxTemp.setText(weather.dailyForecastList.get(0).temperature.max);
minTemp.setText(weather.dailyForecastList.get(0).temperature.min);
// 时段预报
hourlyForecast.removeAllViews();
for (HourlyForecast forecast : weather.hourlyForecastList) {
View view = LayoutInflater.from(this)
.inflate(R.layout.item_hourly_forecast, hourlyForecast, false);
TextView dateText = (TextView) view.findViewById(R.id.time_text);
TextView infoText = (TextView) view.findViewById(R.id.info_text);
TextView tempText = (TextView) view.findViewById(R.id.temp_text);
String time = forecast.date.substring(forecast.date.indexOf(" ") + 1);
dateText.setText(time);
infoText.setText(forecast.more.info);
tempText.setText(forecast.temperature + "℃");
hourlyForecast.addView(view);
}
if (weather.aqi == null) {
airText.setText("无");
aqiText.setText("无");
pm25Text.setText("无");
} else {
airText.setText(weather.aqi.city.airQuality);
aqiText.setText(weather.aqi.city.aqi);
pm25Text.setText(weather.aqi.city.pm25);
}
// 三天预报
dailyForecast.removeAllViews();
for (DailyForecast forecast : weather.dailyForecastList) {
View view = LayoutInflater.from(this).inflate(R.layout.item_daily_forecast,
dailyForecast, false);
TextView dateText = (TextView) view.findViewById(R.id.date_text);
TextView infoText = (TextView) view.findViewById(R.id.info_text);
TextView maxText = (TextView) view.findViewById(R.id.max_text);
TextView minText = (TextView) view.findViewById(R.id.min_text);
dateText.setText(forecast.date);
infoText.setText(forecast.more.info);
maxText.setText(forecast.temperature.max);
minText.setText(forecast.temperature.min);
dailyForecast.addView(view);
}
rainText.setText(weather.now.pcpn + " mm");
pressText.setText(weather.now.pres);
bodyTempText.setText(weather.now.fl + "℃");
windText.setText(weather.now.wind.dir + ", "
+ weather.now.wind.spd + " km/h");
visibilityText.setText(weather.now.vis + " km");
wetText.setText(weather.now.hum + " %");
}
@Override
public void refreshWeatherInfo() {
mPresenter.refreshWeatherInfo(mPresenter.getCurrentWeatherId());
}
@Override
public void onRefreshFailed() {
refreshLayout.setRefreshing(false);
ToastUtil.showToast(this, "刷新失败", 0);
}
@Override
public void onRefreshSucceed() {
refreshLayout.setRefreshing(false);
}
@Override
public void loadBackgroundImg() {
String imageUrl = mPresenter.getImagePrefs();
if (imageUrl == null) {
mPresenter.loadImage();
} else {
Glide.with(this).load(imageUrl).centerCrop().into(bgImage);
}
}
@Override
public void requestWeather(String weatherId) {
mPresenter.getWeatherInfo(weatherId);
}
@Override
public void onFailed() {
ToastUtil.showToast(this, "加载失败", 0);
}
@Override
public void showProgressDialog() {
if (progressDialog == null) {
progressDialog = new ProgressDialog(this);
progressDialog.setMessage("正在加载");
progressDialog.setCanceledOnTouchOutside(false);
}
progressDialog.show();
}
@Override
public void hideProgressDialog() {
if (progressDialog != null) {
progressDialog.dismiss();
}
}
@Override
public void onDestroyView() {
if (mPresenter != null) {
mPresenter = null;
}
}
@Override
protected void onDestroy() {
super.onDestroy();
onDestroyView();
}
@Override
public void onClick(View v) {
switch (v.getId()) {
case R.id.show_marked_county:
showCountyList();
break;
default:
break;
}
}
@Override
public void onRefresh() {
refreshWeatherInfo();
}
}
|
import {apBasePath} from "./_base";
export default apBasePath.appendPathSegment<{}>("/prereg"); |
<filename>src/main/java/com/google/enterprise/secmgr/common/CookieStoreImpl.java
// Copyright 2011 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.enterprise.secmgr.common;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Maps;
import org.joda.time.DateTimeUtils;
import java.util.AbstractCollection;
import java.util.Iterator;
import java.util.Map;
import javax.annotation.Nonnegative;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.annotation.ParametersAreNonnullByDefault;
import javax.annotation.concurrent.NotThreadSafe;
/**
* An implementation of a mutable cookie store that automatically expires
* cookies whenever a cookie is added or the store is iterated.
*/
@NotThreadSafe
@ParametersAreNonnullByDefault
final class CookieStoreImpl extends AbstractCollection<GCookie>
implements CookieStore {
@Nonnull private final Map<GCookie.Key, GCookie> map;
CookieStoreImpl() {
map = Maps.newHashMap();
}
@Override
public int size() {
return map.size();
}
// For a given cookie name, a store can contain exactly one partial key with
// that name, or no partial keys and any number of full keys.
@Override
public boolean add(GCookie cookie) {
GCookie.Key key = cookie.getKey();
GCookie oldCookie = map.get(key);
if (cookie.equals(oldCookie)) {
return false;
}
Iterables.removeIf(this, matchingKeyPredicate(key));
if (oldCookie != null) {
map.put(key, GCookie.builder(cookie).setCreationTime(oldCookie.getCreationTime()).build());
} else {
map.put(key, cookie);
}
return true;
}
private static Predicate<GCookie> matchingKeyPredicate(final GCookie.Key key) {
return key.isPartial()
? new Predicate<GCookie>() {
@Override
public boolean apply(GCookie cookie) {
GCookie.Key other = cookie.getKey();
return key.getName().equals(other.getName());
}
}
: new Predicate<GCookie>() {
@Override
public boolean apply(GCookie cookie) {
GCookie.Key other = cookie.getKey();
return key.getName().equals(other.getName())
&& (other.isPartial() || key.equals(other));
}
};
}
@Override
public Iterator<GCookie> iterator() {
return map.values().iterator();
}
@Override
public boolean equals(Object object) {
if (object == this) { return true; }
if (!(object instanceof Iterable<?>)) { return false; }
return ImmutableSet.copyOf(map.values()).equals(ImmutableSet.copyOf((Iterable<?>) object));
}
@Override
public int hashCode() {
return ImmutableSet.copyOf(map.values()).hashCode();
}
@Override
public void expireCookies(@Nonnegative long timeStamp) {
Iterables.removeIf(this, GCookie.isExpiredPredicate(timeStamp));
}
@Override
public void expireCookies() {
expireCookies(DateTimeUtils.currentTimeMillis());
}
@Override
public boolean contains(String name) {
return get(name) != null;
}
@Override
@Nullable
public GCookie get(String name) {
Preconditions.checkNotNull(name);
for (GCookie cookie : map.values()) {
if (name.equalsIgnoreCase(cookie.getName())) {
return cookie;
}
}
return null;
}
@Override
public boolean contains(GCookie.Key key) {
return get(key) != null;
}
@Override
@Nullable
public GCookie get(GCookie.Key key) {
Preconditions.checkNotNull(key);
return map.get(key);
}
}
|
class DatabaseHelper(context: Context) : SQLiteOpenHelper(context,
DATABASE_NAME, null, 1) {
private val SQL_CREATE_ENTRIES =
"CREATE TABLE " + DatabaseContract.TABLE_NAME + " (" +
DatabaseContract._ID + " INTEGER PRIMARY
KEY," +
DatabaseContract.COLUMN_NAME + " TEXT," +
DatabaseContract.COLUMN_AGE + " TEXT)"
override fun onCreate(db: SQLiteDatabase) {
db.execSQL(SQL_CREATE_ENTRIES)
}
override fun onUpgrade(db: SQLiteDatabase, oldVersion: Int,
newVersion: Int) {
// Drop older table if exists
db.execSQL("DROP TABLE IF EXISTS " +
DatabaseContract.TABLE_NAME)
// Create table
onCreate(db)
}
fun getAvgAge(): Int {
var avgAge: Int = 0
//GET AVG AGE
return avgAge
}
} |
/**
* Endless scrolling for search results
* https://stackoverflow.com/a/4842226/3549270
*/
export default class EndlessScrolling {
constructor($button) {
this.loadFurtherHeight = 500;
this.$button = $button;
this.reset();
this.$target = $("#endless-scroll-target");
this.$button.click(this.activate.bind(this));
}
retarget($newBtn) {
this.$button = $newBtn;
this.$button.click(this.activate.bind(this));
}
activate() {
this.$button.hide();
this.isActive = true;
this.checkAndLoad();
this.scrollListener();
}
checkAndLoad() {
if (this.isLoading || !this.isActive) {
return;
}
if ($(window).scrollTop() >= $(document).height() - $(window).height() - this.loadFurtherHeight) {
this.isLoading = true;
let url = this.$button.data("url") + "?after=" + this.$target.find("> li").length;
$.get(url, (data) => {
let $data = $(data["results"]);
if ($data.length > 0) {
$("#endless-scroll-target").append($data.find("> li"));
} else {
this.isActive = false;
}
this.isLoading = false;
});
}
}
scrollListener() {
$(window).one("scroll", () => {
this.checkAndLoad();
if (this.isActive) {
setTimeout(this.scrollListener.bind(this), 100);
}
});
}
// Called from external sources like FacettedSearch.js
reset() {
this.isActive = false;
this.isLoading = false;
this.$button.show();
}
}
|
docker-compose exec broker kafka-console-consumer --topic example-topic --bootstrap-server broker:9092 \
--from-beginning \
--property print.key=true \
--property key.separator="-" \
--max-messages 12 |
<html>
<head>
<title>My Webpage</title>
</head>
<body>
<div id="header">
<h1>Welcome to my Webpage!</h1>
</div>
<div id="body">
<p>Hello! This is the body section of my webpage.</p>
</div>
<div id="footer">
<p>Copyright © 2020</p>
</div>
</body>
</html> |
const admin = require('firebase-admin');
const utils = require('./utils')
const readline = require('readline').createInterface({
input: process.stdin,
output: process.stdout
});
/**
* Make the backup (read collection & write to json, delete old backup, read json & save in backup collection)
* @param db
* @param myCollection
* @param backupCollection
*/
exports.backupCollection = function (db, myCollection, backupCollection) {
readline.question(`Are you sure you want to make a backup of ${myCollection} in the collection named ${backupCollection} ?[yes]/no : `, async function (answer) {
if (answer === 'no') {
console.log("Ok, bye.");
process.exit(1);
} else {
utils.backupCollectionToJson(db, myCollection).then(() => utils.deleteCollection(db, backupCollection).then(() => utils.saveBackupToFirestore(db, backupCollection)));
}
});
};
/**
* save collection in a json
* @param db
* @param collection
*/
exports.backupCollectionToJson = function (db, collection) {
readline.question(`Are you sure you want to make a backup of ${collection} in backup.json ?[yes]/no : `, async function (answer) {
if (answer === 'no') {
console.log("Ok, bye.");
process.exit(1);
} else {
await utils.backupCollectionToJson(db, collection);
}
});
};
/**
* delete backup collection
* @param db
* @param backupCollection
*/
exports.deleteBackupCollection = function (db, backupCollection) {
readline.question(`Are you sure you want to delete ${backupCollection} ?[yes]/no : `, async function (answer) {
if (answer === 'no') {
console.log("Ok, bye.");
process.exit(1);
} else {
await utils.deleteCollection(db, backupCollection);
}
});
};
/**
* copy json to database
* @param db
* @param collection
*/
exports.backupJsonToCollection = function (db, collection) {
readline.question(`Are you sure you want to copy backup.json in ${collection} ?[yes]/no : `, async function (answer) {
if (answer === 'no') {
console.log("Ok, bye.");
process.exit(1);
} else {
await utils.saveBackupToFirestore(db, collection);
}
});
}
/**
* Initialize admin SDK using serciceAcountKey
* @param key
*/
exports.init = function (key) {
try {
admin.initializeApp({
credential: admin.credential.cert(key)
});
console.log('Successfully connected to Firebase !');
} catch (e) {
throw new Error(`It seems there's a problem : ${e.code}`);
}
const db = admin.firestore();
db.settings({ignoreUndefinedProperties: true});
return db;
};
|
package validate
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestInt_Set(t *testing.T) {
assert.False(t, Int{}.Set())
assert.True(t, Int{MaxSet: true}.Set())
assert.True(t, Int{MinSet: true}.Set())
assert.True(t, Int{MultipleOfSet: true}.Set())
}
func TestInt_Setters(t *testing.T) {
for _, tc := range []struct {
do func(*Int)
expected Int
}{
{
do: func(i *Int) {
i.SetMultipleOf(10)
},
expected: Int{
MultipleOf: 10,
MultipleOfSet: true,
},
},
{
do: func(i *Int) {
i.SetExclusiveMaximum(10)
},
expected: Int{
Max: 10,
MaxExclusive: true,
MaxSet: true,
},
},
{
do: func(i *Int) {
i.SetExclusiveMinimum(10)
},
expected: Int{
Min: 10,
MinExclusive: true,
MinSet: true,
},
},
{
do: func(i *Int) {
i.SetMaximum(10)
},
expected: Int{
Max: 10,
MaxSet: true,
},
},
{
do: func(i *Int) {
i.SetMinimum(10)
},
expected: Int{
Min: 10,
MinSet: true,
},
},
} {
var r Int
tc.do(&r)
assert.Equal(t, tc.expected, r)
}
}
func TestInt_Validate(t *testing.T) {
for _, tc := range []struct {
Name string
Validator Int
Value int64
Valid bool
}{
{Name: "Zero", Valid: true},
{
Name: "MaxOk",
Validator: Int{Max: 10, MaxSet: true},
Value: 10,
Valid: true,
},
{
Name: "MaxErr",
Validator: Int{Max: 10, MaxSet: true},
Value: 11,
Valid: false,
},
{
Name: "MaxExclErr",
Validator: Int{Max: 10, MaxSet: true, MaxExclusive: true},
Value: 10,
Valid: false,
},
{
Name: "MinOk",
Validator: Int{Min: 10, MinSet: true},
Value: 10,
Valid: true,
},
{
Name: "MinErr",
Validator: Int{Min: 10, MinSet: true},
Value: 9,
Valid: false,
},
{
Name: "MinExclErr",
Validator: Int{Min: 10, MinSet: true, MinExclusive: true},
Value: 10,
Valid: false,
},
{
Name: "MultipleOfOk",
Validator: Int{MultipleOf: 2, MultipleOfSet: true},
Value: 8,
Valid: true,
},
{
Name: "MultipleOfErr",
Validator: Int{MultipleOf: 2, MultipleOfSet: true},
Value: 13,
Valid: false,
},
} {
t.Run(tc.Name, func(t *testing.T) {
valid := tc.Validator.Validate(tc.Value) == nil
assert.Equal(t, tc.Valid, valid, "%v: %+v",
tc.Validator,
tc.Value,
)
})
}
}
|
<reponame>olbjan/home-assistant-1
"""Helpers to help coordinate updates."""
import asyncio
from datetime import datetime, timedelta
import logging
from time import monotonic
from typing import Any, Awaitable, Callable, List, Optional
import aiohttp
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import utcnow
from .debounce import Debouncer
REQUEST_REFRESH_DEFAULT_COOLDOWN = 10
REQUEST_REFRESH_DEFAULT_IMMEDIATE = True
class UpdateFailed(Exception):
"""Raised when an update has failed."""
class DataUpdateCoordinator:
"""Class to manage fetching data from single endpoint."""
def __init__(
self,
hass: HomeAssistant,
logger: logging.Logger,
*,
name: str,
update_interval: timedelta,
update_method: Optional[Callable[[], Awaitable]] = None,
request_refresh_debouncer: Optional[Debouncer] = None,
):
"""Initialize global data updater."""
self.hass = hass
self.logger = logger
self.name = name
self.update_method = update_method
self.update_interval = update_interval
self.data: Optional[Any] = None
self._listeners: List[CALLBACK_TYPE] = []
self._unsub_refresh: Optional[CALLBACK_TYPE] = None
self._request_refresh_task: Optional[asyncio.TimerHandle] = None
self.last_update_success = True
if request_refresh_debouncer is None:
request_refresh_debouncer = Debouncer(
hass,
logger,
cooldown=REQUEST_REFRESH_DEFAULT_COOLDOWN,
immediate=REQUEST_REFRESH_DEFAULT_IMMEDIATE,
function=self.async_refresh,
)
else:
request_refresh_debouncer.function = self.async_refresh
self._debounced_refresh = request_refresh_debouncer
@callback
def async_add_listener(self, update_callback: CALLBACK_TYPE) -> Callable[[], None]:
"""Listen for data updates."""
schedule_refresh = not self._listeners
self._listeners.append(update_callback)
# This is the first listener, set up interval.
if schedule_refresh:
self._schedule_refresh()
@callback
def remove_listener() -> None:
"""Remove update listener."""
self.async_remove_listener(update_callback)
return remove_listener
@callback
def async_remove_listener(self, update_callback: CALLBACK_TYPE) -> None:
"""Remove data update."""
self._listeners.remove(update_callback)
if not self._listeners and self._unsub_refresh:
self._unsub_refresh()
self._unsub_refresh = None
@callback
def _schedule_refresh(self) -> None:
"""Schedule a refresh."""
if self._unsub_refresh:
self._unsub_refresh()
self._unsub_refresh = None
# We _floor_ utcnow to create a schedule on a rounded second,
# minimizing the time between the point and the real activation.
# That way we obtain a constant update frequency,
# as long as the update process takes less than a second
self._unsub_refresh = async_track_point_in_utc_time(
self.hass,
self._handle_refresh_interval,
utcnow().replace(microsecond=0) + self.update_interval,
)
async def _handle_refresh_interval(self, _now: datetime) -> None:
"""Handle a refresh interval occurrence."""
self._unsub_refresh = None
await self.async_refresh()
async def async_request_refresh(self) -> None:
"""Request a refresh.
Refresh will wait a bit to see if it can batch them.
"""
await self._debounced_refresh.async_call()
async def _async_update_data(self) -> Optional[Any]:
"""Fetch the latest data from the source."""
if self.update_method is None:
raise NotImplementedError("Update method not implemented")
return await self.update_method()
async def async_refresh(self) -> None:
"""Refresh data."""
if self._unsub_refresh:
self._unsub_refresh()
self._unsub_refresh = None
self._debounced_refresh.async_cancel()
try:
start = monotonic()
self.data = await self._async_update_data()
except asyncio.TimeoutError:
if self.last_update_success:
self.logger.error("Timeout fetching %s data", self.name)
self.last_update_success = False
except aiohttp.ClientError as err:
if self.last_update_success:
self.logger.error("Error requesting %s data: %s", self.name, err)
self.last_update_success = False
except UpdateFailed as err:
if self.last_update_success:
self.logger.error("Error fetching %s data: %s", self.name, err)
self.last_update_success = False
except NotImplementedError as err:
raise err
except Exception as err: # pylint: disable=broad-except
self.last_update_success = False
self.logger.exception(
"Unexpected error fetching %s data: %s", self.name, err
)
else:
if not self.last_update_success:
self.last_update_success = True
self.logger.info("Fetching %s data recovered", self.name)
finally:
self.logger.debug(
"Finished fetching %s data in %.3f seconds",
self.name,
monotonic() - start,
)
self._schedule_refresh()
for update_callback in self._listeners:
update_callback()
|
export class SaveForLaterJourneyRequest{
outwardfaregroupid : number;
outwardserviceid : number;
returnfaregroupid : number;
returnserviceid : number;
outwardloyaltypointcost : number;
returnloyaltypointcost : number;
sessionid : string;
} |
require 'portal_adventure/universe'
RSpec.describe PortalAdventure::Universe do
let(:default_player_character) {
instance_double('PortalAdventure::PlayerCharacter', look: nil)
}
subject {
described_class.new(starting_character: default_player_character)
}
it 'sets the active character to a new PlayerCharacter by default' do
expect(subject.active_character).to eq(default_player_character)
end
describe '#look' do
it 'forwards the look command to the active character' do
output_target = instance_double('PortalAdventure::PlayerInterface')
subject.look(output_target: output_target)
expect(subject.active_character).to have_received(:look)
.with(output_target: output_target)
end
end
end
|
<gh_stars>0
package transaction
//go:generate stringer -type=WitnessScope -output=witness_scope_string.go
import (
"encoding/json"
"fmt"
"strings"
)
// WitnessScope represents set of witness flags for Transaction signer.
type WitnessScope byte
const (
// FeeOnly is only valid for a sender, it can't be used during the execution.
FeeOnly WitnessScope = 0
// CalledByEntry means that this condition must hold: EntryScriptHash == CallingScriptHash.
// No params is needed, as the witness/permission/signature given on first invocation will
// automatically expire if entering deeper internal invokes. This can be default safe
// choice for native NEO/GAS (previously used on Neo 2 as "attach" mode).
CalledByEntry WitnessScope = 0x01
// CustomContracts define custom hash for contract-specific.
CustomContracts WitnessScope = 0x10
// CustomGroups define custom pubkey for group members.
CustomGroups WitnessScope = 0x20
// Global allows this witness in all contexts (default Neo2 behavior).
// This cannot be combined with other flags.
Global WitnessScope = 0x80
)
// ScopesFromString converts string of comma-separated scopes to a set of scopes
// (case-sensitive). String can combine several scopes, e.g. be any of: 'Global',
// 'CalledByEntry,CustomGroups' etc. In case of an empty string an error will be
// returned.
func ScopesFromString(s string) (WitnessScope, error) {
var result WitnessScope
scopes := strings.Split(s, ",")
for i, scope := range scopes {
scopes[i] = strings.TrimSpace(scope)
}
dict := map[string]WitnessScope{
Global.String(): Global,
CalledByEntry.String(): CalledByEntry,
CustomContracts.String(): CustomContracts,
CustomGroups.String(): CustomGroups,
FeeOnly.String(): FeeOnly,
}
var isGlobal bool
for _, scopeStr := range scopes {
scope, ok := dict[scopeStr]
if !ok {
return result, fmt.Errorf("invalid witness scope: %v", scopeStr)
}
if isGlobal && !(scope == Global) {
return result, fmt.Errorf("Global scope can not be combined with other scopes")
}
result |= scope
if scope == Global {
isGlobal = true
}
}
return result, nil
}
// scopesToString converts witness scope to it's string representation. It uses
// `, ` to separate scope names.
func scopesToString(scopes WitnessScope) string {
if scopes&Global != 0 || scopes == FeeOnly {
return scopes.String()
}
var res string
if scopes&CalledByEntry != 0 {
res = CalledByEntry.String()
}
if scopes&CustomContracts != 0 {
if len(res) != 0 {
res += ", "
}
res += CustomContracts.String()
}
if scopes&CustomGroups != 0 {
if len(res) != 0 {
res += ", "
}
res += CustomGroups.String()
}
return res
}
// MarshalJSON implements json.Marshaler interface.
func (s WitnessScope) MarshalJSON() ([]byte, error) {
return []byte(`"` + scopesToString(s) + `"`), nil
}
// UnmarshalJSON implements json.Unmarshaler interface.
func (s *WitnessScope) UnmarshalJSON(data []byte) error {
var js string
if err := json.Unmarshal(data, &js); err != nil {
return err
}
scopes, err := ScopesFromString(js)
if err != nil {
return err
}
*s = scopes
return nil
}
|
<reponame>hunter-heidenreich/pyconversations<filename>tests/feature_extraction/test_extractor.py<gh_stars>0
from datetime import datetime as dt
from itertools import product
import numpy as np
import pytest
from pyconversations.convo import Conversation
from pyconversations.feature_extraction import ConversationVectorizer
from pyconversations.feature_extraction import PostVectorizer
from pyconversations.feature_extraction import UserVectorizer
from pyconversations.message import Tweet
@pytest.fixture
def mock_tweet():
return Tweet(
uid=91242213123121,
text='@Twitter check out this 😏 https://www.twitter.com/ #crazy #link',
author='apnews',
reply_to={3894032234},
created_at=dt(year=2020, month=12, day=12, hour=12, minute=54, second=12)
)
@pytest.fixture
def mock_convo(mock_tweet):
cx = Conversation(convo_id='TEST_POST_IN_CONV')
cx.add_post(mock_tweet)
cx.add_post(Tweet(
uid=3894032234,
text='We are shutting down Twitter',
author='Twitter',
created_at=dt(year=2020, month=12, day=12, hour=12, minute=54, second=2)
))
return cx
@pytest.fixture
def all_post_vecs():
params = product(*[
[None, 'minmax', 'mean', 'standard'],
])
return [
PostVectorizer(normalization=n)
for (n,) in params
]
@pytest.fixture
def all_conv_vecs():
params = product(*[
[None, 'minmax', 'mean', 'standard'],
])
return [
ConversationVectorizer(normalization=n)
for (n,) in params
]
@pytest.fixture
def all_user_vecs():
params = product(*[
[None, 'minmax', 'mean', 'standard'],
])
return [
UserVectorizer(normalization=n)
for (n,) in params
]
def test_post_fit_no_args():
v = PostVectorizer()
with pytest.raises(ValueError):
v.fit(None)
def test_post_transform_no_args():
v = PostVectorizer()
with pytest.raises(ValueError):
v.transform(None)
def test_post_vec_with_posts(mock_tweet, all_post_vecs):
xs = [mock_tweet]
for v in all_post_vecs:
v.fit(xs)
vs = v.transform(xs)
assert type(vs) == np.ndarray
assert vs.shape[0] == 1
assert vs.shape[1] == 18
def test_post_vec_with_convs(mock_convo, all_post_vecs):
xs = [mock_convo]
for v in all_post_vecs:
v.fit(xs)
vs = v.transform(xs)
assert type(vs) == np.ndarray
assert vs.shape[0] == 2
assert vs.shape[1] == 63
def test_fit_transform_invariance(mock_convo):
v = PostVectorizer()
v.fit(mock_convo)
xs = v.transform(mock_convo)
v = PostVectorizer()
xs_ = v.fit_transform(mock_convo)
assert (xs == xs_).all()
def test_conv_convs_invariance(mock_convo):
v = PostVectorizer()
xs = v.fit_transform([mock_convo])
v = PostVectorizer()
xs_ = v.fit_transform(mock_convo)
assert (xs == xs_).all()
def test_post_invalid(mock_convo):
v = PostVectorizer(normalization='akdfhg;asdhgsd')
with pytest.raises(ValueError):
v.fit(mock_convo)
v = PostVectorizer(normalization='akdfhg;asdhgsd')
with pytest.raises(KeyError):
v.transform(mock_convo)
def test_conversation_vec_conv(mock_convo, all_conv_vecs):
for v in all_conv_vecs:
xs = v.fit_transform(mock_convo)
assert type(xs) == np.ndarray
def test_conversation_vec_convs(mock_convo, all_conv_vecs):
for v in all_conv_vecs:
xs = v.fit_transform([mock_convo])
assert type(xs) == np.ndarray
def test_conversation_vec_fail():
with pytest.raises(ValueError):
ConversationVectorizer().fit(None)
with pytest.raises(ValueError):
ConversationVectorizer().transform(None)
def test_user_vec_conv(mock_convo, all_user_vecs):
for v in all_user_vecs:
xs = v.fit_transform(mock_convo)
assert type(xs) == np.ndarray
assert xs.shape[0] == 2
assert xs.shape[1] == 321
def test_user_vec_convs(mock_convo, all_user_vecs):
for v in all_user_vecs:
xs = v.fit_transform([mock_convo])
assert type(xs) == np.ndarray
assert xs.shape[0] == 2
assert xs.shape[1] == 320
def test_user_vec_fail():
with pytest.raises(ValueError):
UserVectorizer().fit(None)
with pytest.raises(ValueError):
UserVectorizer().transform(None)
|
import React, { Component } from 'react'
import { connect } from 'react-redux'
import PersonalInformationForm from 'components/forms/PersonalInformation'
import {
updateForm,
updateFormError,
resetForm,
submitFormData
} from 'actions/account/PersonalInformation'
import { personalInformationValidators } from 'utils/validation/PersonalInformation'
import processMediaPayload from 'utils/mediapayload'
class PersonalInformationFormContainer extends Component {
constructor (props) {
super(props)
}
componentWillUnmount () {
this.props.clearForm()
}
render () {
return <PersonalInformationForm {...this.props} />
}
}
const mapStateToProps = (state, ownProps) => {
const {
account,
auth
} = state
const {
firstname,
surname,
username,
birthDate,
location,
avatarImage,
errors
} = account.personalInformation
const {
firstname: initialFirstname,
surname: initialSurname,
username: initialUsername,
birthDate: initialBirthday,
location: initialLocation,
avatarImage: initialAvatarImage
} = auth.details
return {
values: {
firstname,
surname,
username,
birthDate,
location,
avatarImage
},
errors,
validators: personalInformationValidators,
initialValues: {
firstname: initialFirstname,
surname: initialSurname,
username: initialUsername,
birthDate: initialBirthday,
location: initialLocation,
avatarImage: initialAvatarImage
}
}
}
const mapDispatchToProps = (dispatch, ownProps) => {
return {
update: (name, value) => {
dispatch(updateForm(name, value))
},
updateErrors: (errors, name) => {
dispatch(updateFormError(errors, name))
},
submitForm: (values) => {
// Constructs to FormData
const {
avatarImage,
...rest
} = values
let payload = {
...rest
}
// Only send avatarImage if it's an input file format and not a string
if (typeof avatarImage !== 'string') {
payload['avatarImage'] = avatarImage
}
const data = processMediaPayload({
...payload
})
return dispatch(submitFormData(data))
},
clearForm: () => {
dispatch(resetForm())
}
}
}
export default connect(
mapStateToProps,
mapDispatchToProps
)(PersonalInformationFormContainer)
|
#!/bin/bash
ms="/afs/csail.mit.edu/u/r/rohitsingh/public_html/sketch4jan/sketch-frontend/msketch --fe-cegis-path /afs/csail.mit.edu/u/r/rohitsingh/public_html/sketch4jan/sketch-backend/src/SketchSolver/cegis"
cd /afs/csail.mit.edu/u/r/rohitsingh/public_html/sketch4jan/Dropbox/models
for tr in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20; do
for i in `ls ./model_usage`; do
cd ./model_usage/$i
$ms --slv-timeout 10 model.sk --beopt:simplifycex NOSIM --be:angelic-model -V 10 &> "../../outputs/$i.model_angelic_trial$tr.txt";
itr=`grep 'GOT THE CORRECT' "../../outputs/$i.model_angelic_trial$tr.txt" | awk '{print $6}'`;
time=`grep 'Total time' "../../outputs/$i.model_angelic_trial$tr.txt" | awk '{print $4}'`;
echo "$i iter=$itr time=$time";
cd ../../
done
done
|
<gh_stars>0
package services
import (
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/stretchr/testify/assert"
)
func TestSettigns(t *testing.T) {
t.Run("Valid yaml file", func(t *testing.T) {
assert := assert.New(t)
s, err := NewSettigns("testing1.yaml")
assert.NoError(err)
assert.NotNil(s)
assert.NotNil(s.Source)
assert.NotNil(s.Destination)
assert.Equal(s.Source.BucketName, aws.String("srcBucket"))
assert.Equal(s.Destination.BucketName, aws.String("dstBucket"))
assert.Equal(s.Source.AWSProfile, "srcProfile")
assert.Equal(s.Destination.AWSProfile, "dstProfile")
})
t.Run("Valid yaml file with out destination bucket", func(t *testing.T) {
assert := assert.New(t)
s, err := NewSettigns("testing2.yaml")
assert.NoError(err)
assert.NotNil(s)
assert.NotNil(s.Source)
assert.NotNil(s.Destination)
assert.Equal(s.Source.BucketName, aws.String("srcBucket"))
assert.Nil(s.Destination.BucketName)
assert.Equal(s.Source.AWSProfile, "srcProfile")
assert.Equal(s.Destination.AWSProfile, "dstProfile")
})
t.Run("Unable to open file", func(t *testing.T) {
assert := assert.New(t)
s, err := NewSettigns("bad file")
assert.Error(err)
assert.Nil(s)
})
t.Run("Bad yaml file", func(t *testing.T) {
assert := assert.New(t)
s, err := NewSettigns("bad.yaml")
assert.Error(err)
assert.Nil(s)
})
}
|
<gh_stars>1-10
/*
*
*/
package net.community.chest.net.rmi.test;
import java.io.BufferedReader;
import java.io.PrintStream;
import java.rmi.Remote;
import java.rmi.RemoteException;
import java.rmi.registry.Registry;
import java.util.List;
import java.util.Map;
import net.community.chest.net.rmi.RMIUtils;
/**
* <P>Copyright GPLv2</P>
*
* @author <NAME>.
* @since May 25, 2009 11:37:35 AM
*/
public class SimpleRMITester extends BaseRMITester {
public SimpleRMITester ()
{
super();
}
public interface RMITester extends Remote {
void println (String s) throws RemoteException;
}
public static class RMITesterServerStub implements RMITester {
private PrintStream _out;
public PrintStream getPrintStream ()
{
return _out;
}
public void setPrintStream (PrintStream out)
{
_out = out;
}
public RMITesterServerStub (PrintStream out)
{
_out = out;
}
public RMITesterServerStub ()
{
this(null);
}
/*
* @see net.community.chest.net.rmi.test.SimpleRMITest.RMITester#println(java.lang.String)
*/
@Override
public void println (String s)
{
final PrintStream out=getPrintStream();
if (out != null)
out.println("==> " + s);
if (isQuit(s))
System.exit(0);
}
}
/*
* @see net.community.chest.net.rmi.test.BaseRMITest#testRMIClient(java.io.PrintStream, java.io.BufferedReader, java.util.List)
*/
@Override
public void testRMIClient (PrintStream out, BufferedReader in, List<String> args) throws Exception
{
final Map.Entry<Integer,Registry> rp=inputRegistry(out, in, 0, args);
final Registry r=(null == rp) ? null : rp.getValue();
if (null == r)
return;
final String n=inputLookupName(out, in, 1, args);
if (isQuit(n))
return;
final RMITester tester=RMIUtils.lookup(r, RMITester.class, n);
for ( ; ; )
{
final String s=getval(out, in, "value to send (or Quit)");
tester.println(s);
if (isQuit(s)) break;
}
}
/*
* @see net.community.chest.net.rmi.test.BaseRMITest#testRMIServer(java.io.PrintStream, java.io.BufferedReader, java.util.List)
*/
@Override
public void testRMIServer (PrintStream out, BufferedReader in, List<String> args) throws Exception
{
final Map.Entry<Integer,Registry> rp=inputRegistry(out, in, 0, args);
final Registry r=(null == rp) ? null : rp.getValue();
if (null == r)
return;
final String n=inputLookupName(out, in, 1, args);
if (isQuit(n))
return;
final Integer p=rp.getKey();
final Remote rem=RMIUtils.ensureBinding(r, (null == p) ? 0 : p.intValue(), n, new RMITesterServerStub(out));
out.println("registered " + rem + " at port=" + p + " under name=" + n);
}
}
|
<reponame>edjee/osconfig
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package packages
import (
"context"
"io/ioutil"
"os/exec"
"path/filepath"
)
var pkgs = []string{"pkg1", "pkg2"}
var testCtx = context.Background()
func getMockRun(content []byte, err error) func(_ context.Context, cmd *exec.Cmd) ([]byte, error) {
return func(_ context.Context, cmd *exec.Cmd) ([]byte, error) {
return content, err
}
}
// TODO: move this to a common helper package
func helperLoadBytes(name string) ([]byte, error) {
path := filepath.Join("testdata", name) // relative path
bytes, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
return bytes, nil
}
|
#!/usr/bin/env bash
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
<reponame>JacobTheEvans/code-show
var app = angular.module("main.editor",["ngCookies", "ngRoute","ui.ace","ngClipboard"]);
app.config(["$routeProvider", function($routeProvider) {
$routeProvider.when("/editor/:ind", {
templateUrl: "/views/templates/editor.tpl.html",
controller: "editorController"
})
}]);
app.config(['ngClipProvider', function(ngClipProvider) {
ngClipProvider.setPath("//cdnjs.cloudflare.com/ajax/libs/zeroclipboard/2.1.6/ZeroClipboard.swf");
}]);
app.controller("editorController", ["$scope", "io", "$cookies", "codeMemoryStore", "$routeParams", "privilege", function($scope,io,$cookies,codeMemoryStore,$routeParams,privilege) {
$scope.domain = "localhost:8080";
$scope.ind = $routeParams.ind;
$scope.link = $scope.domain + "/#/editor/" + $scope.ind;
$scope.code = "";
$scope.setPrivilege = function(response) {
if(response.data == "owner") {
$scope.priv = true;
} else {
$scope.priv = false;
}
};
$scope.logError = function(response) {
console.log(response.data);
};
$scope.$watch('code', function() {
io.emitCode(socket,$scope.code,$cookies.get("UserToken"),$scope.ind);
codeMemoryStore.setCode($scope.code);
});
$scope.messages = [];
$scope.addMessage = function(msg) {
var newMessages = $scope.messages;
newMessages.push(msg);
$scope.messages = newMessages;
$("#mes-box").animate({ scrollTop: $('#mes-box')[0].scrollHeight}, 1000);
$scope.$apply()
};
$scope.sendMessage = function() {
io.emitChat(socket,$scope.message,$cookies.get("UserToken"),$scope.ind);
$scope.message = "";
};
$scope.setCode = function(code) {
$scope.code = code;
$scope.$apply();
};
var socket = io.connect();
io.joinroom(socket,$scope.ind);
io.getChat(socket,$scope.addMessage);
io.getCode(socket,$scope.setCode);
privilege.requestPriv($cookies.get("UserToken"),$scope.ind,$scope.setPrivilege,$scope.logError);
}]);
|
def bump_version(current_version, part):
# Split the current version into major, minor, and patch parts
major, minor, patch = map(int, current_version.split('.'))
# Increment the version based on the specified part
if part == 0:
major += 1
minor = 0
patch = 0
elif part == 1:
minor += 1
patch = 0
elif part == 2:
patch += 1
# Return the updated version as a string
return f"{major}.{minor}.{patch}" |
package io.opensphere.core.util;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EventObject;
import java.util.List;
/**
* Defines an event that encapsulates changes to a list.
*
* @param <E> the type of elements in the list
*/
public class ListDataEvent<E> extends EventObject
{
/** The serialVersionUID. */
private static final long serialVersionUID = 1L;
/** The elements that changed. */
private final List<? extends E> myChangedElements;
/** The previous elements before the change. */
private final List<? extends E> myPreviousElements;
/**
* Constructor.
*
* @param source the event source
* @param changedElements the changed elements
*/
public ListDataEvent(Object source, Collection<? extends E> changedElements)
{
super(source);
myChangedElements = new ArrayList<>(changedElements);
myPreviousElements = null;
}
/**
* Constructor.
*
* @param source the event source
* @param previousElements The element that were there before the change.
* @param changedElements the changed elements
*/
public ListDataEvent(Object source, Collection<? extends E> previousElements, Collection<? extends E> changedElements)
{
super(source);
myChangedElements = new ArrayList<>(changedElements);
myPreviousElements = new ArrayList<>(previousElements);
}
/**
* Constructor.
*
* @param source the event source
* @param changedElement the changed element
*/
public ListDataEvent(Object source, E changedElement)
{
super(source);
myChangedElements = Collections.singletonList(changedElement);
myPreviousElements = null;
}
/**
* Constructor.
*
* @param source the event source
* @param previousElement The element that was there before the change.
* @param changedElement the changed element
*/
public ListDataEvent(Object source, E previousElement, E changedElement)
{
super(source);
myChangedElements = Collections.singletonList(changedElement);
myPreviousElements = Collections.singletonList(previousElement);
}
/**
* Gets the changed elements.
*
* @return the changed elements
*/
public List<? extends E> getChangedElements()
{
return myChangedElements;
}
/**
* Gets the previous elements before the change.
*
* @return The previous elements before the change, or null if previous
* elements do not apply to the event.
*/
public List<? extends E> getPreviousElements()
{
return myPreviousElements;
}
}
|
package org.jruby.ir.instructions;
import org.jruby.ir.*;
import org.jruby.ir.operands.Operand;
import org.jruby.ir.operands.WrappedIRClosure;
import org.jruby.ir.runtime.IRRuntimeHelpers;
import org.jruby.ir.transformations.inlining.CloneInfo;
import org.jruby.parser.StaticScope;
import org.jruby.runtime.Block;
import org.jruby.runtime.DynamicScope;
import org.jruby.runtime.ThreadContext;
import org.jruby.runtime.builtin.IRubyObject;
public class RecordEndBlockInstr extends Instr implements FixedArityInstr {
private final IRScope declaringScope;
public RecordEndBlockInstr(IRScope declaringScope, WrappedIRClosure endBlockClosure) {
super(Operation.RECORD_END_BLOCK, new Operand[] { endBlockClosure });
this.declaringScope = declaringScope;
}
public IRScope getDeclaringScope() {
return declaringScope;
}
public WrappedIRClosure getEndBlockClosure() {
return (WrappedIRClosure) operands[0];
}
@Override
public boolean computeScopeFlags(IRScope scope) {
scope.getFlags().add(IRFlags.HAS_END_BLOCKS);
return true;
}
@Override
public Instr clone(CloneInfo ii) {
// SSS FIXME: Correct in all situations??
return new RecordEndBlockInstr(declaringScope, (WrappedIRClosure) getEndBlockClosure().cloneForInlining(ii));
}
@Override
public Object interpret(ThreadContext context, StaticScope currScope, DynamicScope currDynScope, IRubyObject self, Object[] temp) {
Block blk = (Block) getEndBlockClosure().retrieve(context, self, currScope, context.getCurrentScope(), temp);
IRRuntimeHelpers.pushExitBlock(context, blk);
return null;
}
@Override
public void visit(IRVisitor visitor) {
visitor.RecordEndBlockInstr(this);
}
}
|
/*
Current Version: 2.5
Last updated: 8.28.2014
Character Sheet and Script Maintained by: <NAME>
Current Verion: https://github.com/Roll20/roll20-character-sheets/tree/master/StarWarsEdgeOfTheEmpire_Dice
Development and Older Verions: https://github.com/dayst/StarWarsEdgeOfTheEmpire_Dice
Credits:
Original creator: <NAME>.
Helped with Dice specs: <NAME>. and Blake the Lake
Dice graphics hosted by Alicia G. at galacticcampaigns.com
Dice graphics borrowed from the awesome google+ hangouts EotE Dice App
Character Sheet and Advanced Dice Roller: <NAME>
Debugger: Arron
Initiative Roller: <NAME>.
Opposed Roller: <NAME>.
Skill Description by: Gribble - https://dl.dropboxusercontent.com/u/9077657/SW-EotE-Reference-Sheets.pdf
Critical Descriptions by: Gribble - https://dl.dropboxusercontent.com/u/9077657/SW-EotE-Reference-Sheets.pdf
API Chat Commands
Settings:
Log
* default: 'on' and 'single'
* Description: Sets the visual output in the chat window for the dice rolls
* Command: !eed log on|off|multi|single
Graphics
* default: 'on' and 'm'
* Description: Sets chat window dice output as graphic, small, medium, or large if "on" or as text if "off"
* Command: !eed graphics on|off|s|m|l
Test
* Description: Output every side of every die to the chat window
* !eed test
Roll:
Label
* default: null
* Description: set the skill name of the roll
* Command: !eed label(Name of Skill)
Initiative
* default: false
* Description: Set NPC/PC initiative true
* Command: !eed npcinit or pcinit and #b #g #y #blk #p #r #w
Skill
* default:
* Description: create the ability and proficiency dice for a skill check
* Command: !eed skill(char_value|skill_value)
Opposed
* default:
* Description: create the difficulty and challenge dice for an opposed skill check
* Command: !eed opposed(char_value|skill_value)
Dice
* default:
* Description: Loop thru the dice and adds or subtracts them from the dice object
* Command: !eed #g #y #b #blk #r #p #w
Upgrade
* default:
* Description: upgrades ability and difficulty dice
* Command: !eed upgrade(ability|#) or upgrade(difficulty|#)
Downgrade
* default:
* Description: downgrades proficiency and challenge dice
* Command: !eed downgrade(proficiency|#) or downgrade(challenge|#)
*/
var eote = {}
eote.init = function() {
eote.setCharacterDefaults();
eote.createGMDicePool();
eote.events();
}
eote.defaults = {
globalVars : {
diceLogChat : true,
diceGraphicsChat : true,
diceGraphicsChatSize : 30,//medium size
diceTextResult : "",
diceTextResultLog : "",
diceGraphicResult : "",
diceGraphicResultLog : "",
diceTestEnabled : false,
diceLogRolledOnOneLine : true
},
character : {
attributes : [
{
name : "characterID",
current : "UPDATES TO CURRENT ID",
max : "",
update : false
}
],
ablities : [],
},
graphics : {
SIZE : {
SMALL : 20,
MEDIUM : 30,
LARGE : 40
},
ABILITY : {
BLANK : "http://galacticcampaigns.com/images/EotE/Ability/abilityBlank.png",
A : "http://galacticcampaigns.com/images/EotE/Ability/abilityA.png",
AA : "http://galacticcampaigns.com/images/EotE/Ability/abilityAA.png",
S : "http://galacticcampaigns.com/images/EotE/Ability/abilityS.png",
SA : "http://galacticcampaigns.com/images/EotE/Ability/abilitySA.png",
SS : "http://galacticcampaigns.com/images/EotE/Ability/abilitySS.png"
},
BOOST : {
BLANK : "http://galacticcampaigns.com/images/EotE/Boost/boostBlank.png",
A : "http://galacticcampaigns.com/images/EotE/Boost/boostA.png",
AA : "http://galacticcampaigns.com/images/EotE/Boost/boostAA.png",
S : "http://galacticcampaigns.com/images/EotE/Boost/boostS.png",
SA : "http://galacticcampaigns.com/images/EotE/Boost/boostSA.png"
},
CHALLENGE : {
BLANK : "http://galacticcampaigns.com/images/EotE/Challenge/ChallengeBlank.png",
F : "http://galacticcampaigns.com/images/EotE/Challenge/ChallengeF.png",
FF : "http://galacticcampaigns.com/images/EotE/Challenge/ChallengeFF.png",
FT : "http://galacticcampaigns.com/images/EotE/Challenge/ChallengeFT.png",
T : "http://galacticcampaigns.com/images/EotE/Challenge/ChallengeT.png",
TT : "http://galacticcampaigns.com/images/EotE/Challenge/ChallengeTT.png",
DESPAIR : "http://galacticcampaigns.com/images/EotE/Challenge/ChallengeDespair.png"
},
DIFFICULTY : {
BLANK : "http://galacticcampaigns.com/images/EotE/Difficulty/DifficultyBlank.png",
F : "http://galacticcampaigns.com/images/EotE/Difficulty/DifficultyF.png",
FF : "http://galacticcampaigns.com/images/EotE/Difficulty/DifficultyFF.png",
FT : "http://galacticcampaigns.com/images/EotE/Difficulty/DifficultyFT.png",
T : "http://galacticcampaigns.com/images/EotE/Difficulty/DifficultyT.png",
TT : "http://galacticcampaigns.com/images/EotE/Difficulty/DifficultyTT.png"
},
FORCE : {
D : "http://galacticcampaigns.com/images/EotE/Force/ForceD.png",
DD : "http://galacticcampaigns.com/images/EotE/Force/ForceDD.png",
L : "http://galacticcampaigns.com/images/EotE/Force/ForceL.png",
LL : "http://galacticcampaigns.com/images/EotE/Force/ForceLL.png"
},
PROFICIENCY : {
BLANK : "http://galacticcampaigns.com/images/EotE/Proficiency/ProficiencyBlank.png",
A : "http://galacticcampaigns.com/images/EotE/Proficiency/ProficiencyA.png",
S : "http://galacticcampaigns.com/images/EotE/Proficiency/ProficiencyS.png",
SA : "http://galacticcampaigns.com/images/EotE/Proficiency/ProficiencySA.png",
SS : "http://galacticcampaigns.com/images/EotE/Proficiency/ProficiencySS.png",
AA : "http://galacticcampaigns.com/images/EotE/Proficiency/ProficiencyAA.png",
TRIUMPH : "http://galacticcampaigns.com/images/EotE/Proficiency/ProficiencyTriumph.png"
},
SETBACK : {
BLANK : "http://galacticcampaigns.com/images/EotE/Setback/SetBackBlank.png",
F : "http://galacticcampaigns.com/images/EotE/Setback/SetBackF.png",
T : "http://galacticcampaigns.com/images/EotE/Setback/SetBackT.png"
},
SYMBOLS : {
A : "http://galacticcampaigns.com/images/EotE/Symbols/A.png",
S : "http://galacticcampaigns.com/images/EotE/Symbols/S.png",
T : "http://galacticcampaigns.com/images/EotE/Symbols/T.png",
F : "http://galacticcampaigns.com/images/EotE/Symbols/F.png",
TRIUMPH : "http://galacticcampaigns.com/images/EotE/Symbols/Triumph.png",
DESPAIR: "http://galacticcampaigns.com/images/EotE/Symbols/Despair.png",
L : "http://galacticcampaigns.com/images/EotE/Symbols/L.png",
D : "http://galacticcampaigns.com/images/EotE/Symbols/D.png"
}
},
regex : {
cmd : /!eed/,
log : /log (on|multi|single|off)/,
graphics : /graphics (on|off|s|m|l)/,
test : /test/,
resetdice : /(resetgmdice|resetdice)/,
initiative : /\bnpcinit|\bpcinit/,
characterID : /characterID\((.*?)\)/,
label : /label\((.*?)\)/,
skill : /skill\((.*?)\)/g,
opposed : /opposed\((.*?)\)/g,
upgrade : /upgrade\((.*?)\)/g,
downgrade : /downgrade\((.*?)\)/g,
encum : /encum\((.*?)\)/g,
dice : /(\d{1,2}blk)\b|(\d{1,2}b)\b|(\d{1,2}g)\b|(\d{1,2}y)\b|(\d{1,2}p)\b|(\d{1,2}r)\b|(\d{1,2}w)\b/g, ///blk$|b$|g$|y$|p$|r$|w$/
crit : /crit\((.*?)\)/,
critShip : /critship\((.*?)\)/,
}
}
eote.createGMDicePool = function() {
//create character -DicePool
if (findObjs({ _type: "character", name: "-DicePool" }).length == 0){
createObj("character", {
name: "-DicePool",
bio: "GM Dice Pool"
});
Char_dicePoolObject = findObjs({ _type: "character", name: "-DicePool" });
createObj("attribute", {
name: "pcgm",
current: 3,
characterid: Char_dicePoolObject[0].id
});
};
}
eote.createObj = function() {//Create Object Fix - Firebase.set failed
var obj = createObj.apply(this, arguments);
var id = obj.id;
var characterID = obj.get('characterid');
var type = obj.get('type');
if (obj && !obj.fbpath && obj.changed) {
obj.fbpath = obj.changed._fbpath.replace(/([^\/]*\/){4}/, "/");
} else if (obj && !obj.changed && type == 'attribute') { //fix for dynamic attribute after in character created in game
obj.fbpath = '/char-attribs/char/'+ characterID +'/'+ id;
// /char-attribs/char/characterID/attributeID
}
return obj;
}
eote.setCharacterDefaults = function(characterObj) {
var charObj = [characterObj];
if (!characterObj) {
charObj = findObjs({ _type: "character"});
}
//add/update characterID field
_.each(charObj, function(charObj){
//updates default attr:CharacterID to current character id
_.findWhere(eote.defaults.character.attributes, {'name':'characterID'}).current = charObj.id;
//Attributes
eote.updateAddAttribute(charObj, eote.defaults.character.attributes);//Update Add Attribute defaults
//Abilities
});
}
eote.updateAddAttribute = function(charactersObj, updateAddAttributesObj ) { // charactersObj = object or array objects, updateAddAttributesObj = object or array objects
//check if object or array
if (!_.isArray(charactersObj)) {
charactersObj = [charactersObj];
}
if (!_.isArray(updateAddAttributesObj)) {
updateAddAttributesObj = [updateAddAttributesObj];
}
_.each(charactersObj, function(characterObj){//loop characters
var characterName = '';
if(characterObj.name) {
characterName = characterObj.name;
} else {
characterName = characterObj.get('name');
}
//find attribute via character ID
var characterAttributesObj = findObjs({ _type: "attribute", characterid: characterObj.id});
log('//------------------------------->'+ characterName);
_.each(updateAddAttributesObj, function(updateAddAttrObj){ //loop attributes to update / add
attr = _.find(characterAttributesObj, function(a) {
return (a.get('name') === updateAddAttrObj.name);
});
if (attr) {
if (updateAddAttrObj.update) {
log('Update Attr: '+ updateAddAttrObj.name);
attr.set({current: updateAddAttrObj.current});
attr.set({max: updateAddAttrObj.max});
}
} else {
log('Add Attr: '+ updateAddAttrObj.name);
eote.createObj('attribute', {
characterid: characterObj.id,
name: updateAddAttrObj.name,
current: updateAddAttrObj.current,
max: updateAddAttrObj.max
});
}
});
});
}
/* DICE PROCESS
*
* Matches the different regex commands and runs that dice processing step
* The order of step should not be change or dice could be incorrectly rolled.
* example: All dice needs to be 'upgraded" before it can be 'downgraded'
* ---------------------------------------------------------------- */
eote.defaults.dice = function () {
this.vars = {
characterName : '',
characterID : '',
playerName : '',
playerID : '',
label : ''
}
this.totals = {
success : 0,
failure : 0,
advantage : 0,
threat : 0,
triumph : 0,
despair : 0,
light : 0,
dark : 0
}
this.graphicsLog = {
Boost: '',
Ability : '',
Proficiency : '',
SetBack : '',
Difficulty : '',
Challenge : '',
Force : ''
}
this.textLog = {
Boost: '',
Ability : '',
Proficiency : '',
SetBack : '',
Difficulty : '',
Challenge : '',
Force : ''
}
this.count = {
boost: 0,
ability: 0,
proficiency: 0,
setback: 0,
difficulty: 0,
challenge: 0,
force: 0
}
}
eote.process = {}
eote.process.setup = function(cmd, playerName, playerID){
if (!cmd.match(eote.defaults.regex.cmd)) { //check for api cmd !eed
return false;
}
//log(cmd);
/* reset dice - test, might not need this
* ------------------------------------------------------------- */
var diceObj = new eote.defaults.dice();
diceObj.vars.playerName = playerName;
diceObj.vars.playerID = playerID;
/* Dice config
* Description: Change dice roller default config
* --------------------------------------------------------------*/
var logMatch = cmd.match(eote.defaults.regex.log);
if (logMatch) {
eote.process.log(logMatch);
return false; //Stop current roll and run test
}
var graphicsMatch = cmd.match(eote.defaults.regex.graphics);
if (graphicsMatch) {
eote.process.graphics(graphicsMatch);
return false; //Stop current roll and run test
}
var testMatch = cmd.match(eote.defaults.regex.test);
if (testMatch) {
eote.process.test(testMatch);
return false; //Stop current roll and run test
}
/* Roll information
* Description: Set default dice roll information Character Name and Skill Label
* --------------------------------------------------------------*/
var characterIDMatch = cmd.match(eote.defaults.regex.characterID);
if (characterIDMatch) {
diceObj = eote.process.characterID(characterIDMatch, diceObj);
}
var labelMatch = cmd.match(eote.defaults.regex.label);
if (labelMatch) {
diceObj = eote.process.label(labelMatch, diceObj);
}
/* Dice rolls
* Description: Create dice pool before running any custom roll
* script commands that may need dice evaluated.
* --------------------------------------------------------------*/
var encumMatch = cmd.match(eote.defaults.regex.encum);
if (encumMatch) {
diceObj = eote.process.encum(encumMatch, diceObj);
}
var skillMatch = cmd.match(eote.defaults.regex.skill);
if (skillMatch) {
diceObj = eote.process.skill(skillMatch, diceObj);
}
var opposedMatch = cmd.match(eote.defaults.regex.opposed);
if (opposedMatch) {
diceObj = eote.process.opposed(opposedMatch, diceObj);
}
var diceMatch = cmd.match(eote.defaults.regex.dice);
if (diceMatch) {
diceObj = eote.process.setDice(diceMatch, diceObj);
}
var upgradeMatch = cmd.match(eote.defaults.regex.upgrade);
if (upgradeMatch) {
diceObj = eote.process.upgrade(upgradeMatch, diceObj);
}
var downgradeMatch = cmd.match(eote.defaults.regex.downgrade);
if (downgradeMatch) {
diceObj = eote.process.downgrade(downgradeMatch, diceObj);
}
/* Roll dice and update success / fail
* ------------------------------------------------------------- */
diceObj = eote.process.rollDice(diceObj);
/* Custom rolls
* Description: Custom dice components have thier own message, results and
* often will return false to not allow proceeding scripts to fire
* --------------------------------------------------------------*/
var resetdiceMatch = cmd.match(eote.defaults.regex.resetdice);
if (resetdiceMatch) {
eote.process.resetdice(resetdiceMatch, diceObj);
return false;
}
var initiativeMatch = cmd.match(eote.defaults.regex.initiative);
if (initiativeMatch) {
eote.process.initiative(initiativeMatch, diceObj);
//return false;
}
var critMatch = cmd.match(eote.defaults.regex.crit);
if (critMatch) {
eote.process.crit(critMatch, diceObj);
return false;
}
var critShipMatch = cmd.match(eote.defaults.regex.critShip);
if (critShipMatch) {
eote.process.critShip(critShipMatch, diceObj);
return false;
}
/* Display dice output in chat window
* ------------------------------------------------------------- */
eote.process.diceOutput(diceObj, playerName, playerID);
}
/* DICE PROCESS FUNCTION
*
* ---------------------------------------------------------------- */
eote.process.log = function(cmd){
/* Log
* default: 'on' and 'single'
* Description: Sets the visual output in the chat window for the dice rolls
* Command: !eed log on|off|multi|single
* ---------------------------------------------------------------- */
//log(cmd[1]);
switch(cmd[1]) {
case "on": //if 'on' outputs dice to chat window
eote.defaults.globalVars.diceLogChat = true;
sendChat("Dice Sytem", 'Output rolled dice to chat window "On"');
break;
case "off": //if 'off' outputs only results to chat window
eote.defaults.globalVars.diceLogChat = false;
sendChat("Dice Sytem", 'Output rolled dice to chat window "Off"');
break;
case "multi": //if 'multi' multiple sets dice per line
eote.defaults.globalVars.diceLogRolledOnOneLine = false;
sendChat("Dice Sytem", 'Mulitple line output "Off"');
break;
case "single": //if 'single' single set of dice per line
eote.defaults.globalVars.diceLogRolledOnOneLine = true;
sendChat("Dice Sytem", 'Mulitple line output "On"');
break;
}
}
eote.process.graphics = function(cmd){
/* Graphics
* default: 'on' and 'm'
* Description: Sets chat window dice output as graphic, small, medium, or large if "on" or as text if "off"
* Command: !eed graphics on|off|s|m|l
* ---------------------------------------------------------------- */
//log(cmd[1]);
switch(cmd[1]) {
case "on":
eote.defaults.globalVars.diceGraphicsChat = true;
sendChat("<NAME>", 'Chat graphics "On"');
break;
case "off":
eote.defaults.globalVars.diceGraphicsChat = false;
sendChat("<NAME>", 'Chat graphics "Off"');
break;
case "s":
eote.defaults.globalVars.diceGraphicsChatSize = eote.defaults.graphics.SIZE.SMALL;
sendChat("<NAME>", 'Chat graphics size "Small"');
break;
case "m":
eote.defaults.globalVars.diceGraphicsChatSize = eote.defaults.graphics.SIZE.MEDIUM;
sendChat("<NAME>", 'Chat graphics size "Medium"');
break;
case "l":
eote.defaults.globalVars.diceGraphicsChatSize = eote.defaults.graphics.SIZE.LARGE;
sendChat("Dice Sytem", 'Chat graphics size "Large"');
break;
}
}
eote.process.test = function(cmd){
//log(cnd)
//Set test vars to true
eote.defaults.globalVars.diceTestEnabled = true;
tmpLogChat = eote.defaults.globalVars.diceLogChat;
tmpGraphicsChat = eote.defaults.globalVars.diceGraphicsChat;
eote.defaults.globalVars.diceLogChat = true;
eote.defaults.globalVars.diceGraphicsChat = true;
//Roll dice
eote.process.setup('!eed 1b 1g 1y 1blk 1p 1r 1w', 'GM', 'Dice Test');
//After test reset vars back
eote.defaults.globalVars.diceTestEnabled = false;
eote.defaults.globalVars.diceLogChat = tmpLogChat;
eote.defaults.globalVars.diceGraphicsChat = tmpGraphicsChat;
}
eote.process.characterID = function(cmd, diceObj){
/* CharacterId
* default: null
* Description: looks up the characters name based on character ID
* Command: !eed characterID(##########)
* ---------------------------------------------------------------- */
//log(cmd);
var characterID = cmd[1];
if (characterID) {
diceObj.vars.characterName = getObj("character", characterID).get('name');
diceObj.vars.characterID = characterID;
}
return diceObj;
}
eote.process.label = function(cmd, diceObj){
/* Label
* default: null
* Description: set the skill name of the roll
* Command: !eed label(Name of Skill)
* ---------------------------------------------------------------- */
//log(cmd);
var label = cmd[1];
if (label) {
diceObj.vars.label = label;
}
return diceObj;
}
eote.process.resetdice = function(cmd, diceObj){
var characterObj = [{name: diceObj.vars.characterName, id: diceObj.vars.characterID}];
//log(cmd);
if (cmd[1] == 'resetdice') {
var resetdice = [
{
name : "b",
current : 0,
max : "",
update : true
},
{
name : "g",
current : 0,
max : "",
update : true
},
{
name : "y",
current : 0,
max : "",
update : true
},
{
name : "blk",
current : 0,
max : "",
update : true
},
{
name : "r",
current : 0,
max : "",
update : true
},
{
name : "p",
current : 0,
max : "",
update : true
},
{
name : "w",
current : 0,
max : "",
update : true
},
{
name : "upgradeAbility",
current : 0,
max : "",
update : true
},
{
name : "downgradeProficiency",
current : 0,
max : "",
update : true
},
{
name : "upgradeDifficulty",
current : 0,
max : "",
update : true
},
{
name : "downgradeChallenge",
current : 0,
max : "",
update : true
}
]
}
if (cmd[1] == 'resetgmdice') {
var resetdice = [
{
name : "bgm",
current : 0,
max : "",
update : true
},
{
name : "ggm",
current : 0,
max : "",
update : true
},
{
name : "ygm",
current : 0,
max : "",
update : true
},
{
name : "blkgm",
current : 0,
max : "",
update : true
},
{
name : "rgm",
current : 0,
max : "",
update : true
},
{
name : "pgm",
current : 0,
max : "",
update : true
},
{
name : "wgm",
current : 0,
max : "",
update : true
},
{
name : "upgradeAbilitygm",
current : 0,
max : "",
update : true
},
{
name : "downgradeProficiencygm",
current : 0,
max : "",
update : true
},
{
name : "upgradeDifficultygm",
current : 0,
max : "",
update : true
},
{
name : "downgradeChallengegm",
current : 0,
max : "",
update : true
}
]
}
eote.updateAddAttribute(characterObj, resetdice);
}
eote.process.initiative = function(cmd, diceObj){
/* initiative
* default: false
* Description: Set NPC/PC initiative true
* Command: !eed npcinit or pcinit
* ---------------------------------------------------------------- */
var type = '';
var NumSuccess = diceObj.totals.success;
var NumAdvantage = diceObj.totals.advantage;
var turnorder;
//log(diceObj);
//log(NumSuccess);
//log(NumAdvantage);
if(Campaign().get("turnorder") == "") turnorder = []; //NOTE: We check to make sure that the turnorder isn't just an empty string first. If it is treat it like an empty array.
else turnorder = JSON.parse(Campaign().get("turnorder"));
if (cmd[0] == 'pcinit') {
type = 'PC';
}
if (cmd[0] == 'npcinit') {
type = 'NPC';
}
//Add a new custom entry to the end of the turn order.
turnorder.push({
id: "-1",
pr: NumSuccess + ":" + NumAdvantage,
custom: type
});
turnorder.sort(function(x,y) {
var a = x.pr.split(":");
var b = y.pr.split(":");
if (b[0] - a[0] != 0) {//First rank on successes
return b[0] - a[0];
} else if (b[1] - a[1] != 0) {//Then rank on Advantage
return b[1] - a[1];
} else { //If they are still tied, PC goes first
if (x.custom == y.custom) {
return 0;
} else if (x.custom =="NPC") {
return 1;
} else {
return -1;
}
}
});
Campaign().set("turnorder", JSON.stringify(turnorder));
//sendChat(diceObj.vars.characterName, 'Rolls initiative');
}
eote.process.crit = function(cmd, diceObj){
/* Crit
* default:
* Description: Rolls critical injury table
* Command: !eed crit(roll) crit(roll|#) crit(heal|#)
* ---------------------------------------------------------------- */
//log(cmd);
var characterObj = [{name: diceObj.vars.characterName, id: diceObj.vars.characterID}];
var critTable = [
{
percent : '1 to 5',
severity : 1,
name : 'Minor Nick',
Result : 'Suffer 1 strain.',
effect : '',
},
{
percent : '6 to 10',
severity : 1,
name : '<NAME>',
Result : 'May only act during last allied Initiative slot on next turn.',
effect : '',
},
{
percent : '11 to 15',
severity : 1,
name : '<NAME>',
Result : 'May only act during last hero Initiative slot on next turn.',
effect : '',
},
{
percent : '16 to 20',
severity : 1,
name : 'Distracted',
Result : 'Cannot perform free maneuver on next turn.',
effect : '',
},
{
percent : '21 to 25',
severity : 1,
name : 'Off-Balance',
Result : 'Add 1 Setback die to next skill check.',
effect : '',
},
{
percent : '26 to 30',
severity : 1,
name : '<NAME>',
Result : 'Flip one light destiny to dark.',
effect : '',
},
{
percent : '31 to 35',
severity : 1,
name : 'Stunned',
Result : 'Staggered, cannot perform action on next turn.',
effect : '',
},
{
percent : '36 to 40',
severity : 1,
name : 'Stinger',
Result : 'Increase difficulty of next check by 1 Difficulty die.',
effect : '',
},
//----------------------------- Severity 2
{
percent : '41 to 45',
severity : 2,
name : '<NAME>',
Result : 'Knocked prone and suffer 1 strain.',
effect : '',
},
{
percent : '46 to 50',
severity : 2,
name : '<NAME>',
Result : 'Increase difficulty of all Intellect and Cunning checks by 1 Difficulty die until end of encounter.',
effect : '',
},
{
percent : '51 to 55',
severity : 2,
name : '<NAME>',
Result : 'Increase difficulty of all Presence and Willpower checks by 1 Difficulty die until end of encounter.',
effect : '',
},
{
percent : '56 to 60',
severity : 2,
name : '<NAME>',
Result : 'Increase difficulty of all Brawn and Agility checks by 1 Difficulty die until end of encounter.',
effect : '',
},
{
percent : '61 to 65',
severity : 2,
name : '<NAME>',
Result : 'Add 1 Setback die to all skill checks until end of encounter.',
effect : '',
},
{
percent : '66 to 70',
severity : 2,
name : '<NAME>',
Result : 'Remove all Boost dice from all skill checks until end of encounter.',
effect : '',
},
{
percent : '71 to 75',
severity : 2,
name : 'Hamstrung',
Result : 'Lose free maneuver until end of encounter.',
effect : '',
},
{
percent : '76 to 80',
severity : 2,
name : 'Staggered',
Result : 'Attacker may immediately attempt another free attack against you using same dice pool as original attack.',
effect : '',
},
{
percent : '81 to 85',
severity : 2,
name : 'Winded',
Result : 'Cannot voluntarily suffer strain to activate abilities or gain additional maneuvers until end of encounter.',
effect : '',
},
{
percent : '86 to 90',
severity : 2,
name : 'Compromised',
Result : 'Increase difficulty of all skill checks by 1 Difficulty die until end of encounter.',
effect : '',
},
//---------------------------------------- Severity 3
{
percent : '91 to 95',
severity : 3,
name : 'At the Brink',
Result : 'Suffer 1 strain each time you perform an action.',
effect : '',
},
{
percent : '96 to 100',
severity : 3,
name : 'Crippled',
Result : 'Limb crippled until healed or replaced. Increase difficulty of all checks that use that limb by 1 Difficulty die.',
effect : '',
},
{
percent : '101 to 105',
severity : 3,
name : 'Maimed',
Result : 'Limb permanently lost. Unless you have a cybernetic replacement, cannot perform actions that use that limb. Add 1 Setback to all other actions.',
effect : '',
},
{
percent : '106 to 110',
severity : 3,
name : '<NAME>',
Result : 'Roll 1d10 to determine one wounded characteristic -- roll results(1-3 = Brawn, 4-6 = Agility, 7 = Intellect, 8 = Cunning, 9 = Presence, 10 = Willpower. Until Healed, treat characteristic as one point lower.',
effect : '',
},
{
percent : '111 to 115',
severity : 3,
name : '<NAME>',
Result : 'Until healed, may not perform more than one maneuver each turn.',
effect : '',
},
{
percent : '116 to 120',
severity : 3,
name : 'Blinded',
Result : 'Can no longer see. Upgrade the difficulty of Perception and Vigilance checks three times, and all other checks twice.',
effect : '',
},
{
percent : '121 to 125',
severity : 3,
name : 'Knocked Senseless',
Result : 'You can no longer upgrade dice for checks.',
effect : '',
},
//---------------------------------------- Severity 4
{
percent : '126 to 130',
severity : 4,
name : '<NAME>',
Result : 'Roll 1d10 to determine one wounded characteristic -- roll results(1-3 = Brawn, 4-6 = Agility, 7 = Intellect, 8 = Cunning, 9 = Presence, 10 = Willpower. Characteristic is permanently one point lower.',
effect : '',
},
{
percent : '131 to 140',
severity : 4,
name : 'Bleeding Out',
Result : 'Suffer 1 wound and 1 strain every round at the beginning of turn. For every 5 wounds suffered beyond wound threshold, suffer one additional Critical Injury (ignore the details for any result below this result).',
effect : '',
},
{
percent : '141 to 150',
severity : 4,
name : '<NAME>',
Result : 'Die after the last Initiative slot during the next round.',
effect : '',
},
{
percent : '151',
severity : 4,
name : 'Dead',
Result : 'Complete, absolute death.',
effect : '',
}
];
var critRoll = function(addCritNum) {
var openSlot = false;
var diceRoll = '';
var critMod = '';
var rollTotal = '';
var totalcrits = 0;
//check open critical spot
for (i = 15; i >= 1; i--) {
var slot = getAttrByName(diceObj.vars.characterID, 'critOn'+i);
if (slot == '0' || slot == '') {
openSlot = i;
} else {
totalcrits = totalcrits + 1;
}
}
if (!openSlot) {
sendChat("Alert", "Why are you not dead!");
return false;
}
//roll random
if (!addCritNum) {
diceRoll = randomInteger(100);
critMod = (totalcrits * 10);
rollTotal = diceRoll + critMod;
} else {
rollTotal = parseInt(addCritNum);
}
//find crit in crital table
for (var key in critTable) {
var percent = critTable[key].percent.split(' to ');
var low = parseInt(percent[0]);
var high = percent[1] ? parseInt(percent[1]) : 1000;
if ((rollTotal >= low) && (rollTotal <= high)) {
critAttrs = [
{
name : 'critName'+ openSlot,
current : critTable[key].name,
max : '',
update : true
},
{
name : 'critSeverity'+ openSlot,
current : critTable[key].severity,
max : '',
update : true
},
{
name : 'critRange'+ openSlot,
current : critTable[key].percent,
max : '',
update : true
},
{
name : 'critSummary'+ openSlot,
current : critTable[key].Result,
max : '',
update : true
},
{
name : 'critOn'+ openSlot,
current : openSlot,
max : '',
update : true
}
];
eote.updateAddAttribute(characterObj, critAttrs);
var chat = '/direct <br><b>Rolls Critical Injury</b><br>';
chat = chat + '<img src="http://i.imgur.com/z51hRwd.png" /><br/>'
chat = chat + 'Current Criticals: (' + totalcrits + ' x 10)<br>';
chat = chat + 'Dice Roll: ' + diceRoll + '<br>';
chat = chat + 'Total: ' + rollTotal + '<br>';
chat = chat + '<br>';
chat = chat + '<b>' + critTable[key].name + '</b><br>';
chat = chat + critTable[key].Result + '<br>';
sendChat(diceObj.vars.characterName, chat);
}
}
}
var critHeal = function(critID) {
critAttrs = [
{
name : 'critName'+ critID,
current : '',
max : '',
update : true
},
{
name : 'critSeverity'+ critID,
current : '',
max : '',
update : true
},
{
name : 'critRange'+ critID,
current : '',
max : '',
update : true
},
{
name : 'critSummary'+ critID,
current : '',
max : '',
update : true
},
{
name : 'critOn'+ critID,
current : 0,
max : '',
update : true
}
];
eote.updateAddAttribute(characterObj, critAttrs);
}
var critArray = cmd[1].split('|');
var prop1 = critArray[0];
var prop2 = critArray[1] ? critArray[1] : null;
if (prop1 == 'heal') {
critHeal(prop2);
} else if (prop1 == 'add') {
critRoll(prop2);
} else { // crit(roll)
critRoll();
}
}
eote.process.critShip = function(cmd, diceObj){
/* CritShip
* default:
* Description: Rolls vehicle critical table, Both crit() and critShip() function the same
* Command: !eed critShip(roll) critShip(roll|#) critShip(heal|#)
* ---------------------------------------------------------------- */
var characterObj = [{name: diceObj.vars.characterName, id: diceObj.vars.characterID}];
var critTable = [
{
percent : '1 to 9',
severity : 1,
name : '<NAME>',
Result : 'Ship or vehicle suffers 1 system strain.',
effect : '',
},
{
percent : '10 to 18',
severity : 1,
name : 'Jostled',
Result : 'All crew members suffer 1 strain.',
effect : '',
},
{
percent : '19 to 27',
severity : 1,
name : 'Losing Power to Shields',
Result : 'Decrease defense in affected defense zone by 1 until repaired. If ship or vehicle has no defense, suffer 1 system strain.',
effect : '',
},
{
percent : '28 to 36',
severity : 1,
name : 'Knocked Off Course',
Result : 'On next turn, pilot cannot execute any maneuvers. Instead, must make a Piloting check to regain bearings and resume course. Difficulty depends on current speed.',
effect : '',
},
{
percent : '37 to 45',
severity : 1,
name : 'Tailspin',
Result : 'All firing from ship or vehicle suffers 2 setback dice until end of pilot\'s next turn.',
effect : '',
},
{
percent : '46 to 54',
severity : 1,
name : 'Component Hit',
Result : 'One component of the attacker\'s choice is knocked offline, and is rendered inoperable until the end of the following round. See page 245 CRB for Small/Large Vehicle and Ship Component tables. ',
effect : '',
},
// --------------- severity : 2
{
percent : '55 to 63',
severity : 2,
name : 'Shields Failing',
Result : 'Decrease defense in all defense zones by 1 until repaired. If ship or vehicle has no defense, suffer 2 system strain.',
effect : '',
},
{
percent : '64 to 72',
severity : 2,
name : 'Hyperdrive or Navicomputer Failure',
Result : 'Cannot make any jump to hyperspace until repaired. If ship or vehicle has no hyperdrive, navigation systems fail leaving it unable to tell where it is or is going.',
effect : '',
},
{
percent : '73 to 81',
severity : 2,
name : 'Power Fluctuations',
Result : 'Pilot cannot voluntarily inflict system strain on the ship until repaired.',
effect : '',
},
// --------------- severity : 3
{
percent : '82 to 90',
severity : 3,
name : 'Shields Down',
Result : 'Decrease defense in affected defense zone to 0 and all other defense zones by 1 point until repaired. If ship or vehicle has no defense, suffer 4 system strain.',
effect : '',
},
{
percent : '91 to 99',
severity : 3,
name : 'Engine Damaged',
Result : 'Ship or vehicle\'s maximum speed reduced by 1, to a minimum of 1, until repaired.',
effect : '',
},
{
percent : '100 to 108',
severity : 3,
name : 'Shield Overload',
Result : 'Decrease defense in all defense zones to 0 until repaired. In addition, suffer 2 system strain. Cannot be repaired until end of encounter. If ship or vehicle has no defense, reduce armor by 1 until repaired.',
effect : '',
},
{
percent : '109 to 117',
severity : 3,
name : 'Engines Down',
Result : 'Ship or vehicle\'s maximum speed reduced to 0. In addition, ship or vehicle cannot execute maneuvers until repaired. Ship continues on course at current speed and cannot be stopped or course changed until repaired.',
effect : '',
},
{
percent : '118 to 126',
severity : 3,
name : 'Major System Failure',
Result : 'One component of the attacker\'s choice is heavily damages, and is inoperable until the critical hit is repaired. See page 245 CRB for Small/Large Vehicle and Ship Component tables. ',
effect : '',
},
// --------------- severity : 4
{
percent : '127 to 133',
severity : 4,
name : 'Major Hull Breach',
Result : 'Ships and vehicles of silhouette 4 and smaller depressurize in a number of rounds equal to silhouette. Ships of silhouette 5 and larger don\'t completely depressurize, but parts do (specifics at GM discretion). Ships and vehicles operating in atmosphere instead suffer a Destabilized Critical.',
effect : '',
},
{
percent : '134 to 138',
severity : 4,
name : 'Destabilised',
Result : 'Reduce ship or vehicle\'s hull integrity threshold and system strain threshold to half original values until repaired.',
effect : '',
},
{
percent : '139 to 144',
severity : 4,
name : 'Fire!',
Result : 'Fire rages through ship or vehicle and it immediately takes 2 system strain. Fire can be extinguished with appropriate skill, Vigilance or Cool checks at GM\'s discretion. Takes one round per two silhouette to put out.',
effect : '',
},
{
percent : '145 to 153',
severity : 4,
name : 'Breaking Up',
Result : 'At the end of next round, ship is completely destroyed. Anyone aboard has one round to reach escape pod or bail out before they are lost.',
effect : '',
},
{
percent : '154+',
severity : 4,
name : 'Vaporized',
Result : 'The ship or Vehicle is completely destroyed.',
effect : '',
}
];
var critRoll = function(addCritNum) {
var openSlot = false;
var diceRoll = '';
var critMod = '';
var rollTotal = '';
var totalcrits = 0;
//check open critical spot
for (i = 15; i >= 1; i--) {
var slot = getAttrByName(diceObj.vars.characterID, 'critShipOn'+i);
if (slot == '0' || slot == '') {
openSlot = i;
} else {
totalcrits = totalcrits + 1;
}
}
if (!openSlot) {
sendChat("Alert", "Why are you not dead!");
return false;
}
//roll random
if (!addCritNum) {
diceRoll = randomInteger(100);
critMod = (totalcrits * 10);
rollTotal = diceRoll + critMod;
} else {
rollTotal = parseInt(addCritNum);
}
//find crit in crital table
for (var key in critTable) {
var percent = critTable[key].percent.split(' to ');
var low = parseInt(percent[0]);
var high = percent[1] ? parseInt(percent[1]) : 1000;
if ((rollTotal >= low) && (rollTotal <= high)) {
critAttrs = [
{
name : 'critShipName'+ openSlot,
current : critTable[key].name,
max : '',
update : true
},
{
name : 'critShipSeverity'+ openSlot,
current : critTable[key].severity,
max : '',
update : true
},
{
name : 'critShipRange'+ openSlot,
current : critTable[key].percent,
max : '',
update : true
},
{
name : 'critShipSummary'+ openSlot,
current : critTable[key].Result,
max : '',
update : true
},
{
name : 'critShipOn'+ openSlot,
current : openSlot,
max : '',
update : true
}
];
eote.updateAddAttribute(characterObj, critAttrs);
var chat = '/direct <br><b>Rolls Vehicle Critical</b><br>';
chat = chat + '<img src="http://i.imgur.com/JO3pOr8.png" /><br>';//need new graphic
chat = chat + 'Current Criticals: (' + totalcrits + ' x 10)<br>';
chat = chat + 'Dice Roll: ' + diceRoll + '<br>';
chat = chat + 'Total: ' + rollTotal + '<br>';
chat = chat + '<br>';
chat = chat + '<b>' + critTable[key].name + '</b><br>';
chat = chat + critTable[key].Result + '<br>';
sendChat(diceObj.vars.characterName, chat);
}
}
}
var critHeal = function(critID) {
critAttrs = [
{
name : 'critShipName'+ critID,
current : '',
max : '',
update : true
},
{
name : 'critShipSeverity'+ critID,
current : '',
max : '',
update : true
},
{
name : 'critShipRange'+ critID,
current : '',
max : '',
update : true
},
{
name : 'critShipSummary'+ critID,
current : '',
max : '',
update : true
},
{
name : 'critShipOn'+ critID,
current : 0,
max : '',
update : true
}
];
eote.updateAddAttribute(characterObj, critAttrs);
}
var critArray = cmd[1].split('|');
var prop1 = critArray[0];
var prop2 = critArray[1] ? critArray[1] : null;
if (prop1 == 'heal') {
critHeal(prop2);
} else if (prop1 == 'add') {
critRoll(prop2);
} else { // crit(roll)
critRoll();
}
}
eote.process.encum = function(cmd, diceObj){
/* Encumberment
* default:
* Description: If the current encum is great than threshold add 1 setback per unit over current encum
* Command: !eed encum(encum_current|encum_threshold)
* ---------------------------------------------------------------- */
//log(cmd);
_.each(cmd, function(encum) {
var diceArray = encum.match(/\((.*?)\|(.*?)\)/);
if (diceArray && diceArray[1] && diceArray[2]) {
var num1 = eote.process.math(diceArray[1]);
var num2 = eote.process.math(diceArray[2]);
var setbackDice = diceObj.count.setback;
if (num2 > num1) {
diceObj.count.setback = setbackDice + (num2 - num1);
}
}
});
return diceObj;
}
eote.process.skill = function(cmd, diceObj){
/* Skill
* default:
* Description: create the ability and proficiency dice for a skill check
* Command: !eed skill(char_value|skill_value)
* ---------------------------------------------------------------- */
//log(cmd);
_.each(cmd, function(skill) {
var diceArray = skill.match(/\((.*?)\|(.*?)\)/);
if (diceArray && diceArray[1] && diceArray[2]) {
var num1 = eote.process.math(diceArray[1]);
var num2 = eote.process.math(diceArray[2]);
var totalAbil = Math.abs(num1-num2);
var totalProf = (num1 < num2 ? num1 : num2);
var abilityDice = diceObj.count.ability;
var proficiencyDice = diceObj.count.proficiency;
diceObj.count.ability = abilityDice + totalAbil;
diceObj.count.proficiency = proficiencyDice + totalProf;
}
});
return diceObj;
}
eote.process.opposed = function(cmd, diceObj){
/*Opposed
* default:
* Description: create the difficulty and challenge dice for an opposed skill check
* Command: !eed opposed(char_value|skill_value)
* ---------------------------------------------------------------- */
//log(cmd);
_.each(cmd, function(opposed) {
var diceArray = opposed.match(/\((.*?)\|(.*?)\)/);
if (diceArray && diceArray[1] && diceArray[2]) {
var num1 = eote.process.math(diceArray[1]);
var num2 = eote.process.math(diceArray[2]);
var totalOppDiff = Math.abs(num1-num2);
var totalOppChal = (num1 < num2 ? num1 : num2);
var opposeddifficultyDice = diceObj.count.difficulty;
var opposedchallengeDice = diceObj.count.challenge;
diceObj.count.difficulty = opposeddifficultyDice + totalOppDiff;
diceObj.count.challenge = opposedchallengeDice + totalOppChal;
}
});
return diceObj;
}
eote.process.setDice = function(cmd, diceObj){
/* setDice
* default:
* Description: Loop thru the dice and adds or subtracts them from the dice object
* Command: !eed g# y# b# blk# r# p# w# or g#+# or g#-#
* ---------------------------------------------------------------- */
//log(cmd);
_.each(cmd, function(dice) {
var diceArray = dice.match(/(\d{1,2})(\w{1,3})/);
if (diceArray && diceArray[1] && diceArray[2]) {
var diceQty = eote.process.math(diceArray[1]);
diceQty = (isNaN(diceQty) ? 0 : diceQty);
var abilityDice = diceObj.count.ability;
var proficiencyDice = diceObj.count.proficiency;
var difficultyDice = diceObj.count.difficulty;
var challengeDice = diceObj.count.challenge;
var boostDice = diceObj.count.boost;
var setbackDice = diceObj.count.setback;
var forceDice = diceObj.count.force;
switch(diceArray[2]) {
case 'b' :
diceObj.count.boost = boostDice + diceQty;
break;
case 'g' :
diceObj.count.ability = abilityDice + diceQty;
break;
case 'y' :
diceObj.count.proficiency = proficiencyDice + diceQty;
break;
case 'blk' :
diceObj.count.setback = setbackDice + diceQty;
break;
case 'p' :
diceObj.count.difficulty = difficultyDice + diceQty;
break;
case 'r' :
diceObj.count.challenge = challengeDice + diceQty;
break;
case 'w' :
diceObj.count.force = forceDice + diceQty;
break;
}
}
});
return diceObj;
}
eote.process.upgrade = function(cmd, diceObj){
/* Upgrade
* default:
* Description: upgrades ability and difficulty dice
* Command: !eed upgrade(ability|#) or upgrade(difficulty|#)
* ---------------------------------------------------------------- */
_.each(cmd, function(dice) {
var diceArray = dice.match(/\((.*?)\|(.*?)\)/);
if (diceArray && diceArray[1] && diceArray[2]) {
var type = diceArray[1];
var upgradeVal = eote.process.math(diceArray[2]);
var abilityDice = diceObj.count.ability;
var proficiencyDice = diceObj.count.proficiency;
var difficultyDice = diceObj.count.difficulty;
var challengeDice = diceObj.count.challenge;
switch(type) {
case 'ability':
var totalProf = (upgradeVal < abilityDice ? upgradeVal : abilityDice);
var totalAbil = Math.abs(upgradeVal - abilityDice);
if (upgradeVal > abilityDice) {
totalProf = totalProf + Math.floor(totalAbil / 2);
totalAbil = totalAbil % 2;
}
diceObj.count.ability = totalAbil;
diceObj.count.proficiency = proficiencyDice + totalProf;
break;
case 'difficulty':
var totalChall = (upgradeVal < difficultyDice ? upgradeVal : difficultyDice);
var totalDiff = Math.abs(upgradeVal - difficultyDice);
if (upgradeVal > difficultyDice) {
totalChall = totalChall + Math.floor(totalDiff / 2);
totalDiff = totalDiff % 2;
}
diceObj.count.difficulty = totalDiff;
diceObj.count.challenge = challengeDice + totalChall;
break;
}
}
});
return diceObj;
}
eote.process.downgrade = function(cmd, diceObj){
/* Downgrade
* default:
* Description: downgrades proficiency and challenge dice
* Command: !eed downgrade(proficiency|#) or downgrade(challenge|#)
* ---------------------------------------------------------------- */
//log(cmd);
_.each(cmd, function(dice) {
var diceArray = dice.match(/\((.*?)\|(.*?)\)/);
if (diceArray && diceArray[1] && diceArray[2]) {
var type = diceArray[1];
var downgradeVal = eote.process.math(diceArray[2]);
var abilityDice = diceObj.count.ability;
var proficiencyDice = diceObj.count.proficiency;
var difficultyDice = diceObj.count.difficulty;
var challengeDice = diceObj.count.challenge;
switch(type) {
case 'proficiency':
var profConvertedToAbil = proficiencyDice * 2;
if (downgradeVal > (abilityDice + profConvertedToAbil)) {
diceObj.count.ability = 0;
diceObj.count.proficiency = 0;
} else if (downgradeVal > profConvertedToAbil) {
downgradeVal = Math.abs(downgradeVal - profConvertedToAbil);
diceObj.count.ability = Math.abs(downgradeVal - abilityDice);
diceObj.count.proficiency = 0;
} else {
diceObj.count.ability = abilityDice + (profConvertedToAbil - downgradeVal) % 2;
diceObj.count.proficiency = Math.floor((profConvertedToAbil - downgradeVal) / 2);
}
break;
case 'challenge':
var challConvertedToDiff = challengeDice * 2;
if (downgradeVal > (difficultyDice + challConvertedToDiff)) {
diceObj.count.difficulty = 0;
diceObj.count.challenge = 0;
} else if (downgradeVal > challConvertedToDiff) {
downgradeVal = Math.abs(downgradeVal - challConvertedToDiff);
diceObj.count.difficulty = Math.abs(downgradeVal - difficultyDice);
diceObj.count.challenge = 0;
} else {
diceObj.count.difficulty = difficultyDice + (challConvertedToDiff - downgradeVal) % 2;
diceObj.count.challenge = Math.floor((challConvertedToDiff - downgradeVal) / 2);
}
break;
}
}
});
return diceObj;
}
eote.process.math = function (expr){
/* Math
* Returns: Number
* Description: Evaluates a mathematical expression (as a string) and return the result
* ---------------------------------------------------------------- */
var chars = expr.split("");
var n = [], op = [], index = 0, oplast = true;
n[index] = "";
// Parse the expression
for (var c = 0; c < chars.length; c++) {
if (isNaN(parseInt(chars[c])) && chars[c] !== "." && !oplast) {
op[index] = chars[c];
index++;
n[index] = "";
oplast = true;
} else {
n[index] += chars[c];
oplast = false;
}
}
// Calculate the expression
expr = parseFloat(n[0]);
for (var o = 0; o < op.length; o++) {
var num = parseFloat(n[o + 1]);
switch (op[o]) {
case "+":
expr = expr + num;
break;
case "-":
expr = expr - num;
break;
case "*":
expr = expr * num;
break;
case "/":
expr = expr / num;
break;
}
}
return expr;
}
eote.process.addDiceValues = function(diceTotalObj, diceResult){
diceTotalObj.success = diceTotalObj.success + diceResult.success;
diceTotalObj.failure = diceTotalObj.failure + diceResult.failure;
diceTotalObj.advantage = diceTotalObj.advantage + diceResult.advantage;
diceTotalObj.threat = diceTotalObj.threat + diceResult.threat;
diceTotalObj.triumph = diceTotalObj.triumph + diceResult.triumph;
diceTotalObj.despair = diceTotalObj.despair + diceResult.despair;
diceTotalObj.light = diceTotalObj.light + diceResult.light;
diceTotalObj.dark = diceTotalObj.dark + diceResult.dark;
return diceTotalObj;
}
eote.process.totalDiceValues = function (diceTotalObj){
var diceTS = {
success : 0,
failure : 0,
advantage : 0,
threat : 0,
triumph : 0,
despair : 0,
light : 0,
dark : 0,
diceGraphicsLog : "",
diceTextLog : ""
};
var i = 0;
i = diceTotalObj.success - diceTotalObj.failure;
if (i >= 0) {
diceTS.success = i;
} else {
diceTS.failure = Math.abs(i);
}
i = diceTotalObj.advantage - diceTotalObj.threat;
if (i >= 0) {
diceTS.advantage = i;
} else {
diceTS.threat = Math.abs(i);
}
diceTS.triumph = diceTotalObj.triumph;
diceTS.despair = diceTotalObj.despair;
diceTS.light = diceTotalObj.light;
diceTS.dark = diceTotalObj.dark;
return diceTS;
}
eote.process.rollDice = function(diceObj) {
results = {
success : 0,
failure : 0,
advantage : 0,
threat : 0,
triumph : 0,
despair : 0,
light : 0,
dark : 0,
diceGraphicsLog : '',
diceTextLog : ''
}
//Blue "Boost" die (d6)
if (diceObj.count.boost > 0) {
results = eote.roll.boost(diceObj.count.boost);
diceObj.graphicsLog.Boost = results.diceGraphicsLog;
diceObj.textLog.Boost = results.diceTextLog;
diceObj.totals = eote.process.addDiceValues(diceObj.totals, results);
}
//Green "Ability" die (d8)
if (diceObj.count.ability > 0) {
results = eote.roll.ability(diceObj.count.ability);
diceObj.graphicsLog.Ability = results.diceGraphicsLog;
diceObj.textLog.Ability = results.diceTextLog;
diceObj.totals = eote.process.addDiceValues(diceObj.totals, results);
}
//Yellow "Proficiency" die (d12)
if (diceObj.count.proficiency > 0) {
results = eote.roll.proficiency(diceObj.count.proficiency);
diceObj.graphicsLog.Proficiency = results.diceGraphicsLog;
diceObj.textLog.Proficiency = results.diceTextLog;
diceObj.totals = eote.process.addDiceValues(diceObj.totals, results);
}
//Black "SetBack" die (d6)
if (diceObj.count.setback > 0) {
results = eote.roll.setback(diceObj.count.setback);
diceObj.graphicsLog.SetBack = results.diceGraphicsLog;
diceObj.textLog.SetBack = results.diceTextLog;
diceObj.totals = eote.process.addDiceValues(diceObj.totals, results);
}
//Purple "Difficulty" die (d8)
if (diceObj.count.difficulty > 0) {
results = eote.roll.difficulty(diceObj.count.difficulty);
diceObj.graphicsLog.Difficulty = results.diceGraphicsLog;
diceObj.textLog.Difficulty = results.diceTextLog;
diceObj.totals = eote.process.addDiceValues(diceObj.totals, results);
}
//Red "Challenge" die (d12)
if (diceObj.count.challenge > 0) {
results = eote.roll.challenge(diceObj.count.challenge);
diceObj.graphicsLog.Challenge = results.diceGraphicsLog;
diceObj.textLog.Challenge = results.diceTextLog;
diceObj.totals = eote.process.addDiceValues(diceObj.totals, results);
}
//White "Force" die (d12)
if (diceObj.count.force > 0) {
results = eote.roll.force(diceObj.count.force);
diceObj.graphicsLog.Force = results.diceGraphicsLog;
diceObj.textLog.Force = results.diceTextLog;
diceObj.totals = eote.process.addDiceValues(diceObj.totals, results);
}
//finds the sum of each dice attribute
diceObj.totals = eote.process.totalDiceValues(diceObj.totals);
return diceObj;
}
eote.process.diceOutput = function(diceObj, playerName, playerID) {
//log(diceObj);
var s1 = '<img src="';
var s2 = '" title="';
var s3 = '" height="';
var s4 = '" width="';
var s5 = '"/>';
var chatGlobal = '';
var diceGraphicsResults = "";
var diceGraphicsRolled = "";
var diceTextRolled = "";
var diceTextResults = "";
diceTextResults = "["
if (diceObj.totals.success > 0) {
diceTextResults = diceTextResults + " Success:" + diceObj.totals.success;
for (i=1;i<=diceObj.totals.success;i++){
diceGraphicsResults = diceGraphicsResults + s1 + eote.defaults.graphics.SYMBOLS.S + s2 + "Success" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
}
}
if (diceObj.totals.failure > 0) {
diceTextResults = diceTextResults + " Fail:" + diceObj.totals.failure;
for (i=1;i<=diceObj.totals.failure;i++){
diceGraphicsResults = diceGraphicsResults + s1 + eote.defaults.graphics.SYMBOLS.F + s2 + "Failure" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
}
}
if (diceObj.totals.advantage > 0) {
diceTextResults = diceTextResults + " Advant:" + diceObj.totals.advantage;
for (i=1;i<=diceObj.totals.advantage;i++){
diceGraphicsResults = diceGraphicsResults + s1 + eote.defaults.graphics.SYMBOLS.A + s2 + "Advantage" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
}
}
if (diceObj.totals.threat > 0) {
diceTextResults = diceTextResults + " Threat:" + diceObj.totals.threat;
for (i=1;i<=diceObj.totals.threat;i++){
diceGraphicsResults = diceGraphicsResults + s1 + eote.defaults.graphics.SYMBOLS.T + s2 + "Threat" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
}
}
if (diceObj.totals.triumph > 0) {
diceTextResults = diceTextResults + " Triumph:" + diceObj.totals.triumph;
for (i=1;i<=diceObj.totals.triumph;i++){
diceGraphicsResults = diceGraphicsResults + s1 + eote.defaults.graphics.SYMBOLS.TRIUMPH + s2 + "Triumph" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
}
}
if (diceObj.totals.despair > 0) {
diceTextResults = diceTextResults + " Despair:" + diceObj.totals.despair;
for (i=1;i<=diceObj.totals.despair;i++){
diceGraphicsResults = diceGraphicsResults + s1 + eote.defaults.graphics.SYMBOLS.DESPAIR + s2 + "Despair" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
}
}
if (diceObj.totals.light > 0) {
diceTextResults = diceTextResults + " Light:" + diceObj.totals.light;
for (i=1; i<=diceObj.totals.light; i++){
diceGraphicsResults = diceGraphicsResults + s1 + eote.defaults.graphics.SYMBOLS.L + s2 + "Light" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
}
}
if (diceObj.totals.dark > 0) {
diceTextResults = diceTextResults + " Dark:" + diceObj.totals.dark;
for (i=1;i<=diceObj.totals.dark;i++){
diceGraphicsResults = diceGraphicsResults + s1 + eote.defaults.graphics.SYMBOLS.D + s2 + "Dark" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
}
}
diceTextResults = diceTextResults + "]";
//------------------------------------>
if (eote.defaults.globalVars.diceTestEnabled === true) {
chatGlobal = "/direct <br>6b 8g 12y 6blk 8p 12r 12w <br>";
} else if (diceObj.vars.label) {
chatGlobal = "/direct <br><b>Skill:</b> " + diceObj.vars.label + '<br>';
} else {
chatGlobal = "/direct <br>";
}
if (eote.defaults.globalVars.diceTestEnabled === true) {
characterPlayer = 'TEST';
} else if (diceObj.vars.characterName) {
characterPlayer = diceObj.vars.characterName;
} else {
characterPlayer = playerName;
}
//------------------------------------>
if (eote.defaults.globalVars.diceLogChat === true) {
if (eote.defaults.globalVars.diceLogRolledOnOneLine === true) {
diceGraphicsRolled = diceObj.graphicsLog.Boost + diceObj.graphicsLog.Ability + diceObj.graphicsLog.Proficiency + diceObj.graphicsLog.SetBack + diceObj.graphicsLog.Difficulty + diceObj.graphicsLog.Challenge + diceObj.graphicsLog.Force;
if (diceObj.textLog.Boost !="") diceTextRolled = diceTextRolled + "Boost:"+diceObj.textLog.Boost;
if (diceObj.textLog.Ability !="") diceTextRolled = diceTextRolled + "Ability:"+diceObj.textLog.Ability;
if (diceObj.textLog.Proficiency !="") diceTextRolled = diceTextRolled + "Proficiency:"+diceObj.textLog.Proficiency;
if (diceObj.textLog.SetBack !="") diceTextRolled = diceTextRolled + "SetBack:"+diceObj.textLog.SetBack;
if (diceObj.textLog.Difficulty !="") diceTextRolled = diceTextRolled + "Difficulty:"+diceObj.textLog.Difficulty;
if (diceObj.textLog.Challenge !="") diceTextRolled = diceTextRolled + "Challenge:"+diceObj.textLog.Challenge;
if (diceObj.textLog.Force !="") diceTextRolled = diceTextRolled + "Force:"+diceObj.textLog.Force;
if (eote.defaults.globalVars.diceGraphicsChat === true) {
chatGlobal = chatGlobal + '<br>' + diceGraphicsRolled;
} else {
sendChat("", diceTextRolled);
}
} else {
if (eote.defaults.globalVars.diceGraphicsChat === true) {
if (diceObj.vars.label) {
sendChat(characterPlayer, "/direct <br><b>Skill:</b> " + diceObj.vars.label + '<br>');
}
if (diceObj.graphicsLog.Boost !="") sendChat("", "/direct " + diceObj.graphicsLog.Boost);
if (diceObj.graphicsLog.Ability !="") sendChat("", "/direct " + diceObj.graphicsLog.Ability);
if (diceObj.graphicsLog.Proficiency !="") sendChat("", "/direct " + diceObj.graphicsLog.Proficiency);
if (diceObj.graphicsLog.SetBack !="") sendChat("", "/direct " + diceObj.graphicsLog.SetBack);
if (diceObj.graphicsLog.Difficulty !="") sendChat("", "/direct " + diceObj.graphicsLog.Difficulty);
if (diceObj.graphicsLog.Challenge !="") sendChat("", "/direct " + diceObj.graphicsLog.Challenge);
if (diceObj.graphicsLog.Force !="") sendChat("", "/direct " + diceObj.graphicsLog.Force);
} else {
if (diceObj.vars.label) {
sendChat(characterPlayer, "/direct <br><b>Skill:</b> " + diceObj.vars.label + '<br>');
}
if (diceObj.textLog.Boost !="") sendChat("", "Boost:"+diceObj.textLog.Boost);
if (diceObj.textLog.Ability !="") sendChat("", "Ability:"+diceObj.textLog.Ability);
if (diceObj.textLog.Proficiency !="") sendChat("", "Proficiency:"+diceObj.textLog.Proficiency);
if (diceObj.textLog.SetBack !="") sendChat("", "SetBack:"+diceObj.textLog.SetBack);
if (diceObj.textLog.Difficulty !="") sendChat("", "Difficulty:"+diceObj.textLog.Difficulty);
if (diceObj.textLog.Challenge !="") sendChat("", "Challenge:"+diceObj.textLog.Challenge);
if (diceObj.textLog.Force !="") sendChat("", "Force:"+diceObj.textLog.Force);
}
}
}
if (eote.defaults.globalVars.diceGraphicsChat === true ) {
chatGlobal = chatGlobal + '<br>Roll:' + diceGraphicsResults;
sendChat(characterPlayer, chatGlobal);
} else {
sendChat("Roll", diceTextResults);
}
//All DONE!!!
}
//---------------------->
eote.roll = {
boost : function(diceQty){
//Blue "Boost" die (d6)
//1 Blank
//2 Blank
//3 Success
//4 Advantage
//5 Advantage + Advantage
//6 Success + Advantage
var roll = 0;
var diceResult = {
success : 0,
failure : 0,
advantage : 0,
threat : 0,
triumph : 0,
despair : 0,
light : 0,
dark : 0,
diceGraphicsLog : "",
diceTextLog : ""
};
var i = 0;
var s1 = '<img src="';
var s2 = '" title="';
var s3 = '" height="';
var s4 = '" width="';
var s5 = '"/>';
if (eote.defaults.globalVars.diceTestEnabled === true) {
diceQty = 6;
}
for (i=1; i<=diceQty; i++) {
if (eote.defaults.globalVars.diceTestEnabled === true) {
roll = roll + 1;
} else {
roll = randomInteger(6);
}
switch(roll) {
case 1:
diceResult.diceTextLog = diceResult.diceTextLog + "(Blank)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.BOOST.BLANK + s2 + "Boost Blank" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
break;
case 2:
diceResult.diceTextLog = diceResult.diceTextLog + "(Blank)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.BOOST.BLANK + s2 + "Boost Blank" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
break;
case 3:
diceResult.diceTextLog = diceResult.diceTextLog + "(Success)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.BOOST.S + s2 + "Boost Success" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.success = diceResult.success + 1;
break;
case 4:
diceResult.diceTextLog = diceResult.diceTextLog + "(Advantage)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.BOOST.A + s2 + "Boost Advantage" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.advantage = diceResult.advantage + 1;
break;
case 5:
diceResult.diceTextLog = diceResult.diceTextLog + "(Advantage x2)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.BOOST.AA + s2 + "Boost Advantage x2" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.advantage = diceResult.advantage + 2;
break;
case 6:
diceResult.diceTextLog = diceResult.diceTextLog + "(Success + Advantage)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.BOOST.SA + s2 + "Boost Success + Advantage" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.success = diceResult.success + 1;
diceResult.advantage = diceResult.advantage + 1;
break;
}
}
return diceResult;
},
ability : function(diceQty){
//Green "Ability" die (d8)
//1 Blank
//2 Success
//3 Success
//4 Advantage
//5 Advantage
//6 Success + Advantage
//7 Advantage + Advantage
//8 Success + Success
var roll = 0;
var diceTextLog = "";
var diceGraphicsLog = "";
var diceResult = {
success : 0,
failure : 0,
advantage : 0,
threat : 0,
triumph : 0,
despair : 0,
light : 0,
dark : 0,
diceGraphicsLog : "",
diceTextLog : ""
};
var i = 0;
var s1 = '<img src="';
var s2 = '" title="';
var s3 = '" height="';
var s4 = '" width="';
var s5 = '"/>';
if (eote.defaults.globalVars.diceTestEnabled === true) {
diceQty = 8;
}
for (i=1; i<=diceQty; i++) {
if (eote.defaults.globalVars.diceTestEnabled === true) {
roll = roll + 1;
}
else {
roll = randomInteger(8);
}
switch(roll) {
case 1:
diceResult.diceTextLog = diceResult.diceTextLog + "(Blank)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.ABILITY.BLANK + s2 + "Ability Blank" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
break;
case 2:
diceResult.diceTextLog = diceResult.diceTextLog + "(Success)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.ABILITY.S + s2 + "Ability Success" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.success = diceResult.success + 1;
break;
case 3:
diceResult.diceTextLog = diceResult.diceTextLog + "(Success)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.ABILITY.S + s2 + "Ability Success" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.success = diceResult.success + 1;
break;
case 4:
diceResult.diceTextLog = diceResult.diceTextLog + "(Advantage)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.ABILITY.A + s2 + "Ability Advantage" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.advantage = diceResult.advantage + 1;
break;
case 5:
diceResult.diceTextLog = diceResult.diceTextLog + "(Advantage)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.ABILITY.A + s2 + "Ability Advantage" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.advantage = diceResult.advantage + 1;
break;
case 6:
diceResult.diceTextLog = diceResult.diceTextLog + "(Success + Advantage)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.ABILITY.SA + s2 + "Ability Success + Advantage" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.success = diceResult.success + 1;
diceResult.advantage = diceResult.advantage + 1;
break;
case 7:
diceResult.diceTextLog = diceResult.diceTextLog + "(Advantage x2)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.ABILITY.AA + s2 + "Ability Advantage x2" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.advantage = diceResult.advantage + 2;
break;
case 8:
diceResult.diceTextLog = diceResult.diceTextLog + "(Success x2)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.ABILITY.SS + s2 + "Ability Success x2" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.success = diceResult.success + 2;
break;
}
}
return diceResult;
},
proficiency : function(diceQty){
//Yellow "Proficiency" die (d12)
//1 Blank
//2 Triumph
//3 Success
//4 Success
//5 Advantage
//6 Success + Advantage
//7 Success + Advantage
//8 Success + Advantage
//9 Success + Success
//10 Success + Success
//11 Advantage + Advantage
//12 Advantage + Advantage
var roll = 0;
var diceTextLog = "";
var diceGraphicsLog = "";
var diceResult = {
success : 0,
failure : 0,
advantage : 0,
threat : 0,
triumph : 0,
despair : 0,
light : 0,
dark : 0,
diceGraphicsLog : "",
diceTextLog : ""
};
var i = 0;
var s1 = '<img src="';
var s2 = '" title="';
var s3 = '" height="';
var s4 = '" width="';
var s5 = '"/>';
if (eote.defaults.globalVars.diceTestEnabled === true) {
diceQty = 12;
}
for (i=1; i<=diceQty; i++) {
if (eote.defaults.globalVars.diceTestEnabled === true) {
roll = roll + 1;
}
else {
roll = randomInteger(12);
}
switch(roll) {
case 1:
diceResult.diceTextLog = diceResult.diceTextLog + "(Blank)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.PROFICIENCY.BLANK + s2 + "Proficiency Blank" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
break;
case 2:
diceResult.diceTextLog = diceResult.diceTextLog + "(Triumph(+Success))";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.PROFICIENCY.TRIUMPH + s2 + "Proficiency Triumph(+Success)" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.triumph = diceResult.triumph + 1;
diceResult.success = diceResult.success + 1;
break;
case 3:
diceResult.diceTextLog = diceResult.diceTextLog + "(Success)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.PROFICIENCY.S + s2 + "Proficiency Success" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.success = diceResult.success + 1;
break;
case 4:
diceResult.diceTextLog = diceResult.diceTextLog + "(Success)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.PROFICIENCY.S + s2 + "Proficiency Success" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.success = diceResult.success + 1;
break;
case 5:
diceResult.diceTextLog = diceResult.diceTextLog + "(Advantage)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.PROFICIENCY.A + s2 + "Proficiency Advantage" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.advantage = diceResult.advantage + 1;
break;
case 6:
diceResult.diceTextLog = diceResult.diceTextLog + "(Success + Advantage)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.PROFICIENCY.SA + s2 + "Proficiency Success + Advantage" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.success = diceResult.success + 1;
diceResult.advantage = diceResult.advantage + 1;
break;
case 7:
diceResult.diceTextLog = diceResult.diceTextLog + "(Success + Advantage)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.PROFICIENCY.SA + s2 + "Proficiency Success + Advantage" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.success = diceResult.success + 1;
diceResult.advantage = diceResult.advantage + 1;
break;
case 8:
diceResult.diceTextLog = diceResult.diceTextLog + "(Success + Advantage)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.PROFICIENCY.SA + s2 + "Proficiency Success + Advantage" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.success = diceResult.success + 1;
diceResult.advantage = diceResult.advantage + 1;
break;
case 9:
diceResult.diceTextLog = diceResult.diceTextLog + "(Success x2)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.PROFICIENCY.SS + s2 + "Proficiency Success x2" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.success = diceResult.success + 2;
break;
case 10:
diceResult.diceTextLog = diceResult.diceTextLog + "(Success x2)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.PROFICIENCY.SS + s2 + "Proficiency Success x2" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.success = diceResult.success + 2;
break;
case 11:
diceResult.diceTextLog = diceResult.diceTextLog + "(Advantage x2)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.PROFICIENCY.AA + s2 + "Proficiency Advantage x2" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.advantage = diceResult.advantage + 2;
break;
case 12:
diceResult.diceTextLog = diceResult.diceTextLog + "(Advantage x2)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.PROFICIENCY.AA + s2 + "Proficiency Advantage x2" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.advantage = diceResult.advantage + 2;
break;
}
}
return diceResult;
},
setback : function(diceQty){
//Black "Setback" die (d6)
//1 Blank
//2 Blank
//3 Failure
//4 Failure
//5 Threat
//6 Threat
var roll = 0;
var diceTextLog = "";
var diceGraphicsLog = "";
var diceResult = {
success : 0,
failure : 0,
advantage : 0,
threat : 0,
triumph : 0,
despair : 0,
light : 0,
dark : 0,
diceGraphicsLog : "",
diceTextLog : ""
};
var i = 0;
var s1 = '<img src="';
var s2 = '" title="';
var s3 = '" height="';
var s4 = '" width="';
var s5 = '"/>';
if (eote.defaults.globalVars.diceTestEnabled === true) {
diceQty = 6;
}
for (i=1; i<=diceQty; i++) {
if (eote.defaults.globalVars.diceTestEnabled === true) {
roll = roll + 1;
}
else {
roll = randomInteger(6);
}
switch(roll) {
case 1:
diceResult.diceTextLog = diceResult.diceTextLog + "(Blank)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.SETBACK.BLANK + s2 + "Setback Blank" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
break;
case 2:
diceResult.diceTextLog = diceResult.diceTextLog + "(Blank)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.SETBACK.BLANK + s2 + "Setback Blank" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
break;
case 3:
diceResult.diceTextLog = diceResult.diceTextLog + "(Failure)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.SETBACK.F + s2 + "Setback Failure" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.failure = diceResult.failure + 1;
break;
case 4:
diceResult.diceTextLog = diceResult.diceTextLog + "(Failure)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.SETBACK.F + s2 + "Setback Failure" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.failure = diceResult.failure + 1;
break;
case 5:
diceResult.diceTextLog = diceResult.diceTextLog + "(Threat)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.SETBACK.T + s2 + "Setback Threat" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.threat = diceResult.threat + 1;
break;
case 6:
diceResult.diceTextLog = diceResult.diceTextLog + "(Threat)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.SETBACK.T + s2 + "Setback Threat" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.threat = diceResult.threat + 1;
break;
}
}
return diceResult;
},
difficulty : function(diceQty){
//Purple "Difficulty" die (d8)
//1 Blank
//2 Failure
//3 Threat
//4 Threat
//5 Threat
//6 Failure + Failure
//7 Failure + Threat
//8 Threat + Threat
var roll = 0;
var diceTextLog = "";
var diceGraphicsLog = "";
var diceResult = {
success : 0,
failure : 0,
advantage : 0,
threat : 0,
triumph : 0,
despair : 0,
light : 0,
dark : 0,
diceGraphicsLog : "",
diceTextLog : ""
};
var i = 0;
var s1 = '<img src="';
var s2 = '" title="';
var s3 = '" height="';
var s4 = '" width="';
var s5 = '"/>';
if (eote.defaults.globalVars.diceTestEnabled === true) {
diceQty = 8;
}
for (i=1;i<=diceQty;i++) {
if (eote.defaults.globalVars.diceTestEnabled === true) {
roll = roll + 1;
}
else {
roll = randomInteger(8);
}
switch(roll) {
case 1:
diceResult.diceTextLog = diceResult.diceTextLog + "(Blank)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.DIFFICULTY.BLANK + s2 + "Difficulty Blank" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
break;
case 2:
diceResult.diceTextLog = diceResult.diceTextLog + "(Failure)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.DIFFICULTY.F + s2 + "Difficulty Failure" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.failure = diceResult.failure + 1;
break;
case 3:
diceResult.diceTextLog = diceResult.diceTextLog + "(Threat)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.DIFFICULTY.T + s2 + "Difficulty Threat" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.threat = diceResult.threat + 1;
break;
case 4:
diceResult.diceTextLog = diceResult.diceTextLog + "(Threat)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.DIFFICULTY.T + s2 + "Difficulty Threat" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.threat = diceResult.threat + 1;
break;
case 5:
diceResult.diceTextLog = diceResult.diceTextLog + "(Threat)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.DIFFICULTY.T + s2 + "Difficulty Threat" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.threat = diceResult.threat + 1;
break;
case 6:
diceResult.diceTextLog = diceResult.diceTextLog + "(Failure x2)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.DIFFICULTY.FF + s2 + "Difficulty Failure x2" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.failure = diceResult.failure + 2;
break;
case 7:
diceResult.diceTextLog = diceResult.diceTextLog + "(Failure + Threat)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.DIFFICULTY.FT + s2 + "Difficulty Failure + Threat" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.failure = diceResult.failure + 1;
diceResult.threat = diceResult.threat + 1;
break;
case 8:
diceResult.diceTextLog = diceResult.diceTextLog + "(Threat x2)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.DIFFICULTY.TT + s2 + "Difficulty Threat x2" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.threat = diceResult.threat + 2;
break;
}
}
return diceResult;
},
challenge : function(diceQty){
//Red "Challenge" die (d12)
//1 Blank
//2 Despair
//3 Failure
//4 Failure
//5 Threat
//6 Threat
//7 Failure + Failure
//8 Failure + Failure
//9 Threat + Threat
//10 Threat + Threat
//11 Failure + Threat
//12 Failure + Threat
var roll = 0;
var diceTextLog = "";
var diceGraphicsLog = "";
var diceResult = {
success : 0,
failure : 0,
advantage : 0,
threat : 0,
triumph : 0,
despair : 0,
light : 0,
dark : 0,
diceGraphicsLog : "",
diceTextLog : ""
};
var i = 0;
var s1 = '<img src="';
var s2 = '" title="';
var s3 = '" height="';
var s4 = '" width="';
var s5 = '"/>';
if (eote.defaults.globalVars.diceTestEnabled === true) {
diceQty = 12;
}
for (i=1; i<=diceQty; i++) {
if (eote.defaults.globalVars.diceTestEnabled === true) {
roll = roll + 1;
}
else {
roll = randomInteger(12);
}
switch(roll) {
case 1:
diceResult.diceTextLog = diceResult.diceTextLog + "(Blank)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.CHALLENGE.BLANK + s2 + "Challenge Blank" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
break;
case 2:
diceResult.diceTextLog = diceResult.diceTextLog + "(Despair)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.CHALLENGE.DESPAIR + s2 + "Challenge Despair" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.despair = diceResult.despair + 1;
break;
case 3:
diceResult.diceTextLog = diceResult.diceTextLog + "(Failure)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.CHALLENGE.F + s2 + "Challenge Failure" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.failure = diceResult.failure + 1;
break;
case 4:
diceResult.diceTextLog = diceResult.diceTextLog + "(Failure)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.CHALLENGE.F + s2 + "Challenge Failure" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.failure = diceResult.failure + 1;
break;
case 5:
diceResult.diceTextLog = diceResult.diceTextLog + "(Threat)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.CHALLENGE.T + s2 + "Challenge Threat" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.threat = diceResult.threat + 1;
break;
case 6:
diceResult.diceTextLog = diceResult.diceTextLog + "(Threat)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.CHALLENGE.T + s2 + "Challenge Threat" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.threat = diceResult.threat + 1;
break;
case 7:
diceResult.diceTextLog = diceResult.diceTextLog + "(Failure x2)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.CHALLENGE.FF + s2 + "Challenge Failure x2" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.failure = diceResult.failure + 2;
break;
case 8:
diceResult.diceTextLog = diceResult.diceTextLog + "(Failure x2)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.CHALLENGE.FF + s2 + "Challenge Failure x2" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.failure = diceResult.failure + 2;
break;
case 9:
diceResult.diceTextLog = diceResult.diceTextLog + "(Threat x2)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.CHALLENGE.TT + s2 + "Challenge Threat x2" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.threat = diceResult.threat + 2;
break;
case 10:
diceResult.diceTextLog = diceResult.diceTextLog + "(Threat x2)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.CHALLENGE.TT + s2 + "Challenge Threat x2" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.threat = diceResult.threat + 2;
break;
case 11:
diceResult.diceTextLog = diceResult.diceTextLog + "(Failure + Threat)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.CHALLENGE.FT + s2 + "Challenge Failure + Threat" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.failure = diceResult.failure + 1;
diceResult.threat = diceResult.threat + 1;
break;
case 12:
diceResult.diceTextLog = diceResult.diceTextLog + "(Failure + Threat)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.CHALLENGE.FT + s2 + "Challenge Failure + Threat" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.failure = diceResult.failure + 1;
diceResult.threat = diceResult.threat + 1;
break;
}
}
return diceResult;
},
force : function(diceQty){
//White "Force" die (d12)
//1 Light
//2 Light
//3 Light + Light
//4 Light + Light
//5 Light + Light
//6 Dark
//7 Dark
//8 Dark
//9 Dark
//10 Dark
//11 Dark
//12 Dark + Dark
var roll = 0;
var diceTextLog = "";
var diceGraphicsLog = "";
var diceResult = {
success : 0,
failure : 0,
advantage : 0,
threat : 0,
triumph : 0,
despair : 0,
light : 0,
dark : 0,
diceGraphicsLog : "",
diceTextLog : ""
};
var i = 0;
var s1 = '<img src="';
var s2 = '" title="';
var s3 = '" height="';
var s4 = '" width="';
var s5 = '"/>';
if (eote.defaults.globalVars.diceTestEnabled === true) {
diceQty = 12;
}
for (i=1; i<=diceQty; i++) {
if (eote.defaults.globalVars.diceTestEnabled === true) {
roll = roll + 1;
}
else {
roll = randomInteger(12);
}
switch(roll) {
case 1:
diceResult.diceTextLog = diceResult.diceTextLog + "(Light)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.FORCE.L + s2 + "Force Light" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.light = diceResult.light + 1;
break;
case 2:
diceResult.diceTextLog = diceResult.diceTextLog + "(Light)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.FORCE.L + s2 + "Force Light" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.light = diceResult.light + 1;
break;
case 3:
diceResult.diceTextLog = diceResult.diceTextLog + "(Light x2)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.FORCE.LL + s2 + "Force Light x2" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.light = diceResult.light + 2;
break;
case 4:
diceResult.diceTextLog = diceResult.diceTextLog + "(Light x2)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.FORCE.LL + s2 + "Force Light x2" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.light = diceResult.light + 2;
break;
case 5:
diceResult.diceTextLog = diceResult.diceTextLog + "(Light x2)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.FORCE.LL + s2 + "Force Light x2" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.light = diceResult.light + 2;
break;
case 6:
diceResult.diceTextLog = diceResult.diceTextLog + "(Dark)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.FORCE.D + s2 + "Force Dark" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.dark = diceResult.dark + 1;
break;
case 7:
diceResult.diceTextLog = diceResult.diceTextLog + "(Dark)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.FORCE.D + s2 + "Force Dark" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.dark = diceResult.dark + 1;
break;
case 8:
diceResult.diceTextLog = diceResult.diceTextLog + "(Dark)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.FORCE.D + s2 + "Force Dark" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.dark = diceResult.dark + 1;
break;
case 9:
diceResult.diceTextLog = diceResult.diceTextLog + "(Dark)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.FORCE.D + s2 + "Force Dark" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.dark = diceResult.dark + 1;
break;
case 10:
diceResult.diceTextLog = diceResult.diceTextLog + "(Dark)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.FORCE.D + s2 + "Force Dark" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.dark = diceResult.dark + 1;
break;
case 11:
diceResult.diceTextLog = diceResult.diceTextLog + "(Dark)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.FORCE.D + s2 + "Force Dark" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.dark = diceResult.dark + 1;
break;
case 12:
diceResult.diceTextLog = diceResult.diceTextLog + "(Dark x2)";
diceResult.diceGraphicsLog = diceResult.diceGraphicsLog + s1 + eote.defaults.graphics.FORCE.DD + s2 + "Force Dark x2" + s3 + eote.defaults.globalVars.diceGraphicsChatSize + s4 + eote.defaults.globalVars.diceGraphicsChatSize + s5;
diceResult.dark = diceResult.dark + 2;
break;
}
}
return diceResult;
}
}
eote.events = function() {
//event listner Add character defaults to new characters
on("add:character", function(characterObj) {
eote.setCharacterDefaults(characterObj);
});
on("chat:message", function(msg) {
if (msg.type != 'api') {
return;
}
eote.process.setup(msg.content, msg.who, msg.playerid);
});
}
on('ready', function() {
eote.init();
//eote.process.setup('!eed characterID(-JTu_xSU9-LVHyjcs7qx) crit(roll)', 'Steve', 'playerID');
});
|
#!/bin/bash
set -e
usage() {
cat <<EOF
$0
--sdk <SDK file path>
--system <system sdk file path>
--support <support library file path>
EOF
exit 2
}
banner() {
echo "**************************************************"
echo "Updating $1 "
echo "**************************************************"
}
update_sdk() {
if [ -f "$SDK" ]
then
banner "SDK"
cd $ROOT_DIR/current
rm -f android.jar uiautomator.jar framework.aidl
unzip -j $SDK */android.jar */uiautomator.jar */framework.aidl
fi
}
update_system_sdk() {
if [ -f "$SYSTEM_SDK" ]
then
banner "system SDK"
cp -f $SYSTEM_SDK $ROOT_DIR/system_current/android.jar
fi
}
update_support_lib() {
if [ -f "$SUPPORT" ]
then
banner "support library"
rm -rf $ROOT_DIR/current/support/
cd $ROOT_DIR/current
unzip $SUPPORT >/dev/null
# Remove duplicates
rm -f support/v7/appcompat/libs/android-support-v4.jar
rm -f support/multidex/instrumentation/libs/android-support-multidex.jar
# Remove samples
rm -rf support/samples
# Remove source files
find support -name "*.java" \
-o -name "*.aidl" \
-o -name AndroidManifest.xml \
| xargs rm
# Other misc files we don't need
find support -name "*.gradle" \
-o -name ".classpath" \
-o -name ".project" \
-o -name "project.properties" \
-o -name "source.properties" \
-o -name ".readme" \
-o -name "README.txt" \
-o -name "package.html" \
-o -name "NOTICE.txt" \
| xargs rm
# Now we can remove empty dirs
find . -type d -empty -delete
fi
}
main() {
while [ "$#" -gt 0 ]
do
case "$1" in
--help|-h)
usage
;;
--sdk)
export SDK="$2"
shift; shift
;;
--system)
export SYSTEM_SDK="$2"
shift; shift
;;
--support)
export SUPPORT="$2"
shift; shift
;;
-*)
usage
;;
*)
break
;;
esac
done
ROOT_DIR=$(realpath $(dirname $0))
update_sdk
update_system_sdk
update_support_lib
}
main $*
|
List<String> result = new ArrayList<>();
for (int i = 0; i < data.length; i++) {
if (i % 2 != 0) {
result.add(data[i]);
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.