text stringlengths 1 1.05M |
|---|
package com.javakc.pms.dispord.vo;
import lombok.Data;
import java.util.Date;
//查询条件的实体类
@Data
public class DispOrdQuery {
private String orderName;
private String beginDate;
private String endDate;
}
|
#!/bin/bash
if (($# != 2)); then
echo "Usage: ./build.sh db-data-directory app-log-directory version"
exit 1
fi
if [ -e ./server/TopCoderChallengeNotifierBackend-${3}.jar ]; then
:
else
echo "TopCoderChallengeNotifierBackend-${3}.jar is missing in server folder."
exit 1
fi
if [ "$(sudo docker network ls | grep "tpcn-net" | grep "bridge")" ]; then
echo "Using existing tpcn-net network."
else
sudo docker network create --driver bridge tpcn-net
fi
if [ "$(sudo docker container ls | grep "tpcn-mongodb")" ]; then
echo "tpcn-mongodb already present using it."
else
sudo docker run --rm -d \
--network tpcn-net \
--mount type=bind,src=${1},target=/data/db \
--name tpcn-mongodb mongo:4.2
fi
sudo docker build --network tpcn-net --tag tpcn-api:${3} .
sudo docker run --rm -d --network tpcn-net -p 8080:8080 \
--mount type=bind,src=${2},target=/tpcn/app/logs \
--name tpcn-api-run tpcn-api:${3} -DLOG_HOME="/tpcn/app/logs" \
TopCoderChallengeNotifierBackend-${3}.jar \
--db.host=tpcn-mongodb --JKS_KEYSTORE_ALIAS=dummy --JKS_KEYSTORE_FILE=dummy.jks \
--JKS_KEYSTORE_PASSWORD=dummy --APP_SENDER_MAIL=dummy@gmail.com \
--APP_SENDER_MAIL_PASSWORD=dummy --schedule_rate=1800000 \
--server.servlet.contextPath=/v1
|
<reponame>SoftwarearchitekturTeam/TypeTogether
package de.hswhameln.typetogether.networking.api.exceptions;
public class FunctionalException extends Exception {
public FunctionalException(String message) {
super(message);
}
public FunctionalException() {
super();
}
}
|
echo * |
def reverse(head):
prev = None
current = head
while current is not None:
next_node = current.next
current.next = prev
prev = current
current = next_node
return prev |
#!/usr/bin/env bash
# coco pretrained provided by r50_dconvDETR_C5_pretrained_coco_Q100 setting
set -x
PY_ARGS=${@:1}
conda activate pytorch
which python
EXP_DIR=exps/refcoco/r101_det
python3.8 -u main_vg.py \
--pretrained_model "./data/MODEL_ZOO/detr-r101-2c7b67e5.pth"\
--num_feature_levels 1\
--dataset refcoco_unc\
--train_split train\
--test_split val testA testB\
--dec_layers 6\
--backbone resnet101\
--aux_loss \
--img_size 640\
--max_img_size 640\
--epochs 90\
--lr_drop 60\
--output_dir ${EXP_DIR} \
${PY_ARGS}
EXP_DIR=exps/refcoco/r101
python3.8 -u main_vg.py \
--pretrained_model "./SAVED_MODEL/refcoco_101_det/RefTR_refcoco_101/checkpoint_best.pth"\
--num_feature_levels 1\
--masks\
--lr 1e-5\
--lr_mask_branch_proj 10\
--dataset refcoco_unc\
--train_split train\
--test_split val testA testB\
--dec_layers 6\
--backbone resnet101\
--aux_loss \
--img_size 640\
--max_img_size 640\
--epochs 40\
--lr_drop 30\
--output_dir ${EXP_DIR} \ |
#!/bin/bash
set -eux;
INDEX_NAME="inventoryitems";
curl -H'Content-Type: application/json' -XPOST 'localhost:9200/'"$INDEX_NAME"'/_delete_by_query?conflicts=proceed' -d' { "query": { "match_all": {} }}'
|
<gh_stars>0
String loadHomePage();
String loadGauge();
void getQueryString();
void getSettings();
void setSettings(String, String);
void startAccesPoint();
void clearSettings();
void connectToWifi();
String load404(ESP32WebServer);
String loadDashboard();
void getAndSetTime();
void startTimer();
void rotateplants();
String getTimeString();
#define ANALOGLIGHTPIN1 1 // number of outgoing bytes to buffer for Serial1
|
#!/bin/bash
#SBATCH -J Act_cosper_1
#SBATCH --mail-user=eger@ukp.informatik.tu-darmstadt.de
#SBATCH --mail-type=FAIL
#SBATCH -e /work/scratch/se55gyhe/log/output.err.%j
#SBATCH -o /work/scratch/se55gyhe/log/output.out.%j
#SBATCH -n 1 # Number of cores
#SBATCH --mem-per-cpu=6000
#SBATCH -t 23:59:00 # Hours, minutes and seconds, or '#SBATCH -t 10' -only mins
#module load intel python/3.5
python3 /home/se55gyhe/Act_func/sequence_tagging/arg_min/PE-my.py cosper 247 Adadelta 4 0.11466301815818268 1.020655355224463 he_normal 0.05
|
# coding=utf-8
from data_packer.field._base import _IField, BaseField
from data_packer.field.single import OptionalField
from data_packer.container import BaseContainer
from data_packer import err, constant
class SelectorField(_IField):
valid_field_cls = (OptionalField,)
def __init__(self, fields, at_most=999999, at_least=0):
"""
:param fields: field对象的list, 按迭代器的顺序来取字段
:param at_most: 最多取多少个字段, 即取到at_most个字段后停止
:param at_least: 最少多少个字段
:type fields: list[OptionalField]
:type at_most: int
:type at_least: int
:return:
"""
self.fields = fields
self.at_most = at_most
self.at_least = at_least
if at_least > at_most:
raise err.DataPackerProgramError('at_least({}) > at_most({})'.format(at_least, at_most))
if not all([
isinstance(field, self.valid_field_cls)
for field in fields
]):
raise err.DataPackerProgramError('SelectorField Only consists of {}'.format(self.valid_field_cls))
def __str__(self):
return '{cls_name}: {property}'.format(
cls_name=self.__class__,
property=self.__dict__
)
def run(self, src, dst):
got = 0
for field in self.fields:
try:
field.run(src, dst)
got += 1
except err.DataPackerError:
continue
if got >= self.at_most:
break
if got < self.at_least:
raise err.DataPackerLackFieldError('field({}): got {}, but need {}'.format(self, got, self.at_least))
return True
class CompositedField(BaseField):
def __init__(self, fields, src_sub_container_cls, dst_sub_container, src_name, dst_name=None, overwrite=constant.OverwriteMode.OVERWRITE):
"""
:param fields:
:type fields:
:param src_sub_container_cls:
:type src_sub_container_cls: type(BaseContainer)
:param dst_sub_container:
:type dst_sub_container: BaseContainer
:param src_name:
:type src_name: object
:param dst_name:
:type dst_name: object
:param overwrite:
:type overwrite:
"""
super(CompositedField, self).__init__(src_name, dst_name, overwrite, None, None)
self.fields = fields
self.src_sub_container_cls = src_sub_container_cls
self.dst_sub_container = dst_sub_container
def __str__(self):
return '{cls_name}: {property}'.format(
cls_name=self.__class__,
property=self.__dict__
)
def _get_value(self, src):
src_raw_data = super(CompositedField, self)._get_value(src)
src_sub_container = self.src_sub_container_cls(src_raw_data)
for field in self.fields:
field.run(src_sub_container, self.dst_sub_container)
return self.dst_sub_container.raw_data()
|
#! /usr/bin/env sh
set -eux
STABLE=6.6.0
LATEST=7.0.0
systemctl stop docker.service
mount -t tmpfs tmpfs /var/lib/docker/ -o size=100%
systemctl start docker.service
gcloud auth configure-docker
TEMP=$(mktemp -d)
mount -t tmpfs tmpfs ${TEMP}/ -o size=100%
REPOSITORY=${REPOSITORY:-chainercv}
case ${REPOSITORY} in
chainercv)
CUPY=${CHAINER}
cp -a . ${TEMP}/chainercv
;;
chainer)
CHAINER=local
if git merge-base --is-ancestor origin/v6 HEAD; then
CUPY=stable
elif git merge-base --is-ancestor origin/master HEAD; then
CUPY=latest
fi
cp -a . ${TEMP}/chainer
mv ${TEMP}/chainer/chainercv/ ${TEMP}/
;;
cupy)
if git merge-base --is-ancestor origin/v6 HEAD; then
CHAINER=stable
elif git merge-base --is-ancestor origin/v7 HEAD; then
CHAINER=latest
fi
CUPY=local
cp -a . ${TEMP}/cupy
mv ${TEMP}/cupy/chainercv/ ${TEMP}/
;;
esac
cd ${TEMP}/
case ${CHAINER} in
stable)
echo pip${PYTHON} install chainer==${STABLE} >> install.sh
;;
latest)
echo pip${PYTHON} install chainer==${LATEST} >> install.sh
;;
master)
CHAINER_MASTER=$(git ls-remote https://github.com/chainer/chainer.git master | cut -f1)
echo pip${PYTHON} install \
git+https://github.com/chainer/chainer.git@${CHAINER_MASTER}#egg=chainer >> install.sh
;;
local)
echo pip${PYTHON} install -e chainer/ >> install.sh
;;
esac
case ${CUPY} in
stable)
echo pip${PYTHON} install cupy-cuda92==${STABLE} >> install.sh
;;
latest)
echo pip${PYTHON} install cupy-cuda92==${LATEST} >> install.sh
;;
master)
CUPY_MASTER=$(gsutil -q cp gs://tmp-asia-pfn-public-ci/cupy/wheel/master -)
gsutil -q cp gs://tmp-asia-pfn-public-ci/cupy/wheel/${CUPY_MASTER}/cuda9.2/*.whl .
echo pip${PYTHON} install cupy-*-cp${PYTHON}*-cp${PYTHON}*-linux_x86_64.whl >> install.sh
;;
local)
echo pip${PYTHON} install -e cupy/ >> install.sh
;;
esac
echo pip${PYTHON} install -e chainercv/ >> install.sh
if [ ${OPTIONAL_MODULES} -gt 0 ]; then
DOCKER_TAG=devel
else
DOCKER_TAG=devel-minimal
fi
DOCKER_IMAGE=asia.gcr.io/pfn-public-ci/chainercv:${DOCKER_TAG}
docker pull ${DOCKER_IMAGE} || true
docker build \
--cache-from ${DOCKER_IMAGE} \
--tag ${DOCKER_IMAGE} \
chainercv/.pfnci/docker/${DOCKER_TAG}/
|
package ghtorrent.models
case class Issue(id: Int, pullRequestId: Int)
|
#!/bin/bash -f
xv_path="/media/natu/data/xilinx/Vivado/2016.4"
ExecStep()
{
"$@"
RETVAL=$?
if [ $RETVAL -ne 0 ]
then
exit $RETVAL
fi
}
ExecStep $xv_path/bin/xsim tb_top_behav -key {Behavioral:memrw:Functional:tb_top} -tclbatch tb_top.tcl -view /media/natu/data/proj/myproj/NPU/fpga_implement/npu8/tb_top_behav.wcfg -log simulate.log
|
#! /bin/sh
logfile=/tmp/bb-jail-custom.log
echo -n Setting up resolv.conf...
cat << EOF 2>/dev/null > /etc/resolv.conf
search dragonflybsd.org backplane.com
nameserver 10.0.0.25
nameserver 10.0.0.2
EOF
if [ $? -ne 0 ]; then
echo failed!
exit 1
else
echo success!
fi
echo -n Testing inet connection...
if ! ping -qq -o www.google.es >> ${logfile} 2>&1; then
echo failed!
exit 1
else
echo success!
fi
if [ ! -x /usr/local/sbin/pkg ]; then
echo -n Bootstrapping pkg...
cd /usr
if ! make pkg-bootstrap >> ${logfile} 2>&1; then
echo failed!
exit 1
else
echo success!
fi
fi
echo -n "Installing packages... "
if [ ! -x /usr/local/bin/git ]; then
if ! pkg install -y git-lite >> ${logfile} 2>&1; then
echo failed!
exit 1
else
echo -n "git-lite "
fi
fi
if [ ! -x /usr/local/bin/mkisofs ]; then
if ! pkg install -y cdrtools >> ${logfile} 2>&1; then
echo failed!
exit 1
else
echo -n "cdrtools..."
fi
fi
echo "success!"
if [ ! -d /usr/src ]; then
echo -n Checking out src...
cd /usr
if ! make src-create-shallow >> ${logfile} 2>&1; then
echo failed!
exit 1
else
echo success!
fi
fi
if [ ! -d /usr/dports ]; then
echo -n Checking out dports...
cd /usr
if ! make dports-create-shallow >> ${logfile} 2>&1; then
echo failed!
exit 1
else
echo success!
fi
fi
if [ ! -x /root/dobuild.sh ]; then
echo -n Generating dobuild.sh script...
cat <<EOF 2>/dev/null > /root/dobuild.sh
#!/bin/sh
cd /usr/src
case "\$1" in
"release")
cd nrelease
make \$1
;;
"buildkernel"|"nativekernel")
make -j12 \$1 KERNCONF=\$2
;;
"buildworld")
make -j12 \$1
;;
*)
echo Bad build option
;;
esac
EOF
if [ $? -ne 0 ]; then
echo failed!
exit 1
else
echo success!
fi
fi
|
/**
* @author WMXPY
* @namespace Color
* @description Color
* @override Unit Test
*/
import { expect } from "chai";
import * as Chance from "chance";
import { Color } from "../../src";
describe('Given {Color} Class', (): void => {
// eslint-disable-next-line @typescript-eslint/no-unused-vars
const chance: Chance.Chance = new Chance('color-color');
it('should be able to construct', (): void => {
const instance: Color = Color.black();
expect(instance).to.be.instanceOf(Color);
});
});
|
cd terraform \
&& rm -rf .terraform \
&& terraform init \
&& yes yes | terraform destroy \
&& yes yes | terraform apply
|
import React, { useState } from 'react';
function Modal() {
const [text, setText] = useState("");
const [showModal, setShowModal] = useState(false);
const handleSubmit = () => {
// Do something with submitted text
}
return (
<>
<button onClick={() => setShowModal(true)}>Open Modal</button>
{showModal && (
<div>
<input type="text" value={text} onChange={e => setText(e.target.value)} />
<button onClick={handleSubmit}>Submit</button>
</div>
)}
</>
);
}
export default Modal; |
require "dtext"
require "nokogiri"
class Danbooru
module HasDTextFields
def html_body
DTextRagel.parse(body)
end
def pretty_body
nodes = Nokogiri::HTML.fragment(html_body)
nodes.children.map do |node|
case node.name
when "i"
"*#{node.text.gsub(/\*/, "\*")}*"
when "b"
"**#{node.text.gsub(/\*\*/, "\*\*")}**"
when "div", "blockquote"
# no-op
nil
else
node.text
end
end.compact.take(2).join("\n\n")
end
end
end
|
#!/bin/bash
source py3/bin/activate
python online.py prediction_o.obj
deactivate
|
function(page, done) {
this.fetch('https://orf.at', { responseFormat: 'text' }, (response) => {
done(this.createResult('FETCH', `response from orf.at: ${response.length}`, 'info'));
});
}
|
import React from 'react';
let step = (state = [], action) => {
if (state.id != action.id) {
return state
}
switch (action.type) {
case 'STEP_DETUNE_DECREASE':
return {
...state,
detune: state.detune - 10
}
case 'STEP_DETUNE_INCREASE':
return {
...state,
detune: state.detune + 10
}
case 'STEP_FREQUENCY_DECREASE':
return {
...state,
frequency: state.frequency - 100
}
case 'STEP_FREQUENCY_INCREASE':
return {
...state,
frequency: state.frequency + 100
}
case 'TOGGLE_STEP_ACTIVE':
return {
...state,
selected: !state.selected
}
default:
return state;
}
}
const steps = (state = false, action) => {
if (!state) {
return Array(16).fill().map((_, id) => {
return {
id,
selected: false,
frequency: Math.floor(Math.random() * 2000 + 1),
detune: Math.floor(Math.random() * 100 + 1)
};
});
}
switch (action.type) {
case 'STEP_FREQUENCY_DECREASE':
case 'STEP_FREQUENCY_INCREASE':
case 'TOGGLE_STEP_ACTIVE':
return state.map(s => step(s, action));
break;
default:
return state;
}
}
export default steps
|
package com.haufelexware.report.nunit.v25.dom;
import javax.xml.bind.annotation.*;
/**
* A culture info.
*/
@XmlAccessorType(XmlAccessType.FIELD)
public class NUnitTestCase extends NUnitTest {}
|
<reponame>amazoyer/ml-ranking-module
package com.datafari.ranking.training;
import java.io.IOException;
import org.apache.solr.client.solrj.SolrClient;
import com.lucidworks.spark.rdd.SolrJavaRDD;
/**
*
* Give all needed solr client
* - Spark Solr RDD
* - CloudSolrJ Client
* - Simple HTTP Client
*
*/
public interface ISolrClientProvider {
public SolrClient getSolrClient() throws IOException;
public SolrJavaRDD getSolrJavaRDD() throws IOException;
public SolrHttpClient getSolrHttpClient() throws IOException;
public void close();
}
|
import xivapi from "./XIVAPI";
import Popup from "./Popup";
import ButtonLoading from "./ButtonLoading";
class AccountCharacters
{
constructor()
{
this.uiAddCharacterResponse = $(".character_add_response");
}
watch()
{
if (mog.path != "account") {
return;
}
this.handleNewCharacterSearch();
}
/**
* Handles adding a new character
*/
handleNewCharacterSearch()
{
const $button = $(".character_add");
// add character clicked
$button.on("click", event => {
// grab entered info
const character = {
string: $("#character_string").val().trim(),
server: $("#character_server").val().trim(),
};
// validate IDs
let lodestoneId = null;
if (character.string.indexOf("finalfantasyxiv.com") > -1) {
character.string = character.string.split("/");
character.string = character.string[5];
lodestoneId = character.string;
}
if (character.string.indexOf(" ") == -1) {
lodestoneId = character.string;
}
if (character.string.length == 0) {
Popup.error("Nothing entered?", "I think you forgot to type something...");
return;
}
ButtonLoading.start($button);
// if lodestone id, we good to go
if (lodestoneId) {
this.handleNewCharacterViaLodestoneId(lodestoneId);
return;
}
// else search and find a lodestone id.
const name = character.string.split(" ");
this.uiAddCharacterResponse.html("Searching Lodestone for your character...");
fetch(`/lodestone/search/character/${character.server}/${name[0]}/${name[1]}`)
.then(response => response.json())
.then(data => this.handleNewCharacterViaLodestoneId(data.id))
.catch(err => {
Popup.error("Not Found (code 8)", "Could not find your character on Lodestone, try entering the Lodestone URL for your character.");
ButtonLoading.finish($button);
this.uiAddCharacterResponse.html("");
});
});
}
/**
* Handle a character via their lodestone id
*/
handleNewCharacterViaLodestoneId(lodestoneId, reCalled)
{
const $button = $(".character_add");
this.uiAddCharacterResponse.html("Searching Lodestone for your character...");
fetch(`/lodestone/character/${lodestoneId}`)
.then(response => response.json())
.then(data => {
this.uiAddCharacterResponse.html("Character found, verifying auth code.");
const verifyCodeIdx = data.bio.search(verify_code);
console.log(verifyCodeIdx);
if (verifyCodeIdx === -1) {
Popup.error("Auth Code Not Found", `Could not find your auth code (${verify_code}) on your characters profile, try again!`);
this.uiAddCharacterResponse.html("");
ButtonLoading.finish($button);
return;
}
this.uiAddCharacterResponse.html("Auth code found, adding character...");
$.ajax({
url: mog.urls.characters.add.replace("-id-", lodestoneId),
success: response => {
if (response === true) {
Popup.success("Character Added!", "Your character has been added, the page will refresh in 3 seconds.");
Popup.setForcedOpen(true);
setTimeout(() => {
location.reload();
}, 3000);
return;
}
Popup.error("Character failed to add", `Error: ${response.Message}`);
/*document.getElementById(character_string).reset(); */
},
error: (a, b, c) => {
Popup.error("Something Broke (code 145)", "Could not add your character, please hop on Discord and complain to Kara");
console.error(a, b, c);
},
complete: () => {
this.uiAddCharacterResponse.html("");
ButtonLoading.finish($button);
}
});
})
.catch(err => {
Popup.error("Character failed to add", `Error: ${err}`);
});
}
}
export default new AccountCharacters;
|
<filename>finddup.py
#!/usr/bin/env python
import os
import sys
import re
import string
import hashlib
import argparse
from functools import partial
# borrowed from http://goo.gl/kFJZKb
# which originally borrowed from http://goo.gl/zeJZl
def human2bytes(s):
"""
>>> human2bytes('1M')
1048576
>>> human2bytes('1G')
1073741824
"""
symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
letter = s[-1].strip().upper()
num = s[:-1]
if letter not in symbols:
return -1
try:
num = float(num)
except ValueError:
return -1
prefix = {symbols[0]: 1}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
return int(num * prefix[letter])
class HelpFormatterMixin(argparse.RawDescriptionHelpFormatter,
argparse.ArgumentDefaultsHelpFormatter):
"""Formatter class to correctly display example in the epilog."""
def hash_md5(path, name, blocksize):
"""hash_md5(path, name, blocksize) -> <md5digest>
Returns the md5 checksum of the file's first blocksize block
"""
filename = os.path.join(path, name)
if not os.path.isfile(filename):
return 'NotARegularFile'
flsize = os.stat(filename).st_size
readbytes = None if flsize < blocksize else blocksize
with open(filename, 'rb') as fl:
return hashlib.md5(fl.read(readbytes)).hexdigest()
def hash_fuzzy(ignored, name):
"""First ^normalize^ a filename and then return the md5 digest of the
normalized name.
Normalizing means:
* converting the filename to lowercase & removing the extension
* removing all spaces and punctuation in the filename
"""
name, _ = os.path.splitext(name.lower())
name = name.replace('&', 'and')
name = name.translate(None, string.whitespace + string.punctuation)
return hashlib.md5(name.encode('utf-8')).hexdigest()
def main():
parser = argparse.ArgumentParser(
usage = "%s [OPTIONS] DIRECTORIES ..." % sys.argv[0],
description = 'Find duplicate files within a list of directories',
formatter_class = HelpFormatterMixin,
epilog = (
"Example: find all likely duplicate files under the current\n"
"directory using the md5 checksums of the first 1K bytes of\n"
"the files to identify duplicates.\n"
"\t$ %s -m -b 1K ./") % sys.argv[0]
)
parser.add_argument('DIRECTORIES', nargs='+', help="directories to search")
parser.add_argument('-e', '--exclude', default='(?!.*)',
help='exclude files where the path matches the provided regex pattern')
parser.add_argument('-o', '--only', default='.*',
help='only consider files where the name matches the provided regex pattern')
ex_group = parser.add_mutually_exclusive_group()
ex_group.add_argument('-n', '--name', action="store_true", default=True,
help="use exact filenames (fastest)")
ex_group.add_argument('-f', '--fuzzy', action="store_true",
help="use fuzzy match of file names")
parser.add_argument('-m', '--md5', action="store_true",
help="use md5 checksums (slowest)")
ex_group.add_argument('-B', '--blocksize', default='512K',
help=("limit md5 checksums to first BLOCKSIZE bytes. "
"Recognizes human readable formats, eg: 1G, 32M"))
parser.add_argument('-I', '--inverse', action="store_true", default=False,
help=("Inverse the report, ie: report files for "
"that *do not* have a duplicate copy."))
args = parser.parse_args()
blocksize = human2bytes(args.blocksize if args.blocksize else '512K')
if args.md5:
hash_fn = partial(hash_md5, blocksize=blocksize)
elif args.fuzzy:
hash_fn = hash_fuzzy
else: # args.name <- default
hash_fn = lambda _, name: name
path_pattern = re.compile(args.exclude)
name_pattern = re.compile(args.only)
# - begin hashing
file_hash = {}
nfiles = 0
for directory in args.DIRECTORIES:
for root, subdirs, files in os.walk(directory):
for name in filter(name_pattern.search, files):
path = os.path.join(root, name)
if path_pattern.search(path):
continue
nfiles += 1
file_hash.setdefault(hash_fn(root, name), []).append(path)
if args.inverse:
report = {k: v for k, v in file_hash.items() if len(v) == 1}
else:
report = {k: v for k, v in file_hash.items() if len(v) > 1}
for k, v in report.items():
print('%s\n\t%s' % (k, '\n\t'.join(sorted(v))))
print('\nProcessed {} files '.format(nfiles),)
if report:
if args.inverse:
print('and found {} files without duplicates'.format(len(report)))
else:
print('and found {} possible duplicates'.format(len(report)))
else:
if args.inverse:
print('\nProcessed {} files and found all files duplicated'.format(nfiles))
else:
print('\nProcessed {} files and found no duplicates'.format(nfiles))
if __name__ == '__main__':
main()
|
package gr.sullenart.games.fruitcatcher.models;
public class FruitType {
public static final float SEASONAL_FRUITS_RATIO = 0.25f;
public static String [] fruitNames = {
"Strawberry", "Cherries", "Blueberry", "PassionFruit",
"Watermelon", "Grapes", "Peach", "Fig",
"Apple", "Pear", "Banana", "Coconut",
"Orange", "Kiwi", "Pineapple", "Papaya"};
private static int [][] seasonalFruits = new int[][] {
{ 0, 1, 2, 3},
{ 4, 5, 6, 7},
{ 8, 9, 10, 11},
{ 12, 13, 14, 15}
};
public static int [] getFruitsInSeason(int season) {
return seasonalFruits[season];
}
public static boolean isInSeason(int fruit, int season) {
int [] fruits = seasonalFruits[season];
for(int f: fruits) {
if (f == fruit) {
return true;
}
}
return false;
}
} |
<gh_stars>0
#include "Window.h"
#include "Input.h"
#include "Core/Application/Application.h"
static void ErrorCallback(int error_code, const char* error_msg)
{
ENGINE_LOG("GLFW Error: %s (%i)", error_msg, error_code);
}
// ------------------------------------------------------------------------------
float Window::GetGLFWTime() const
{
return (float)glfwGetTime();
}
// ------------------------------------------------------------------------------
Window::~Window()
{
ENGINE_LOG("Terminating GLFW");
glfwDestroyWindow(m_Window);
glfwTerminate();
}
void Window::Init()
{
// -- GLFW Initialization --
glfwSetErrorCallback(ErrorCallback);
if (!glfwInit())
{
ENGINE_LOG("GLFW Initialization Failed\n");
return;
}
// -- GLFW Hints --
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 6);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
#ifdef _DEBUG
glfwWindowHint(GLFW_OPENGL_DEBUG_CONTEXT, GLFW_TRUE);
#endif
// -- Window Creation --
ENGINE_LOG("Creating Window '%s' of %ix%i\n", m_Name.c_str(), m_Width, m_Height);
m_Window = glfwCreateWindow(m_Width, m_Height, m_Name.c_str(), NULL, NULL);
if (!m_Window)
{
ENGINE_LOG("glfwCreateWindow() failed\n");
return;
}
// -- Graphics Context Creation --
glfwMakeContextCurrent(m_Window);
// -- GLFW Window User ptr & VSYNC --
glfwSetWindowUserPointer(m_Window, this);
SetVSYNC(true);
// -- Set GLFW Callbacks --
SetGLFWEventCallbacks();
// Load all OpenGL functions using the glfw loader function
if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress))
{
ENGINE_LOG("Failed to initialize OpenGL context\n");
return;
}
}
void Window::Update()
{
// -- Call Platform Callbacks --
glfwPollEvents();
// -- Present image on Screen --
glfwSwapBuffers(m_Window);
}
void Window::ResizeWindow(uint width, uint height)
{
m_Width = width;
m_Height = height;
Application::Get().OnWindowResize(width, height);
}
void Window::CloseWindow()
{
Application::Get().CloseApplication();
}
void Window::SetVSYNC(bool enabled)
{
enabled ? glfwSwapInterval(1) : glfwSwapInterval(0);
m_VSYNC = enabled;
}
// ------------------------------------------------------------------------------
void Window::SetGLFWEventCallbacks() const
{
// Window/Application Events
glfwSetWindowSizeCallback(m_Window, [](GLFWwindow* window, int w, int h)
{
((Window*)glfwGetWindowUserPointer(window))->ResizeWindow(w, h);
});
glfwSetWindowCloseCallback(m_Window, [](GLFWwindow* window)
{
((Window*)glfwGetWindowUserPointer(window))->CloseWindow();
});
// Mouse Events
glfwSetMouseButtonCallback(m_Window, [](GLFWwindow* window, int button, int action, int mods)
{
Input::SetMouseCallback(button, action);
});
glfwSetScrollCallback(m_Window, [](GLFWwindow* window, double xOff, double yOff)
{
Input::SetScrollCallback(xOff, yOff);
});
glfwSetCursorPosCallback(m_Window, [](GLFWwindow* window, double xPos, double yPos)
{
Input::SetCursorCallback(xPos, yPos);
});
// Key Events
glfwSetKeyCallback(m_Window, [](GLFWwindow* window, int key, int scancode, int action, int mods)
{
Input::SetKeyCallback(key, action);
});
//glfwSetCharCallback(m_Window, [](GLFWwindow* window, uint keycode)
// {
// WindowData& data = *(WindowData*)glfwGetWindowUserPointer(window);
// KeyTypedEvent event(keycode);
// data.EventCallback(event);
// });
} |
<filename>include/linear_solvers/assemblers/assembler_base.tcc
//! Assign global node indices
template <unsigned Tdim>
bool mpm::AssemblerBase<Tdim>::assign_global_node_indices(
unsigned nactive_node, unsigned nglobal_active_node) {
bool status = true;
try {
// Total number of active node (in a rank) and (rank) node indices
active_dof_ = nactive_node;
global_node_indices_ = mesh_->global_node_indices();
#ifdef USE_MPI
// Total number of active node (in all rank)
global_active_dof_ = nglobal_active_node;
// Initialise mapping vector
rank_global_mapper_.resize(active_dof_);
// Nodes container
const auto& nodes = mesh_->active_nodes();
for (int counter = 0; counter < nodes.size(); counter++) {
// Assign get nodal global index
rank_global_mapper_[counter] = nodes[counter]->global_active_id();
}
#endif
} catch (std::exception& exception) {
console_->error("{} #{}: {}\n", __FILE__, __LINE__, exception.what());
status = false;
}
return status;
} |
string toUpper(string s)
{
for (int i=0; i<s.length(); i++)
if (s[i] >= 'a' && s[i] <= 'z')
s[i] = s[i] - 'a' + 'A';
return s;
}
// Driver Code
int main()
{
string s = "hello world";
cout << "String in Lowercase: " << s << endl;
cout << "String in Uppercase: " << toUpper(s);
} |
chmod -R 755 /opt/intel/openvino_2019.3.376/deployment_tools/inference_engine/demos/object_detection_demo_ssd_async
chown -R dsingal /opt/intel/openvino_2019.3.376/deployment_tools/inference_engine/demos/object_detection_demo_ssd_async
|
import readline from "readline";
import { Writable } from "stream";
export default class SecureStdin {
rl: readline.Interface;
// stdinHandler?: (input: string) => void;
mutableStdout: Writable;
muted = false;
constructor() {
this.mutableStdout = new Writable({
write: (chunk, encoding, callback) => {
if (!this.muted) {
process.stdout.write(chunk, encoding);
}
callback();
}
});
this.rl = readline.createInterface({
input: process.stdin,
output: this.mutableStdout,
// When 'terminal': false
// the user's password will be invisible in the console while typing
terminal: true,
});
// this.rl.on('line', (line) => {
// if (this.stdinHandler) {
// this.stdinHandler(line);
// }
// });
}
}
export async function input(question: string): Promise<string> {
return new Promise(function (resolve, reject) {
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout
});
rl.question(question, (answer) => {
rl.close();
resolve(answer);
});
});
} |
#!/usr/bin/env bash
set -e
export PROCESS_LOCK_ID="$1"
export PROCESS_EXIT_CODE=0
if [ -z "$PROCESS_LOCK_ID" ]; then
echo "You must pass the unique lock ID for this job!"
exit 100
fi
# ------------------------------------------------------------------------------
ci_run() {
local FUNC="PROCESS_$1"
if [ "$(type -t "$FUNC")" == "function" ]; then
${FUNC}
else
echo "==> WARNING: Unable to run the \"$FUNC\" since it doesn't exist!"
fi
}
ci_hook() {
# shellcheck disable=SC2064
# https://github.com/koalaman/shellcheck/wiki/SC2064
trap "$2" EXIT
ci_run "$1"
}
ci_lock() {
local NEW_EXIT_CODE="$1"
# An exit code of a previous stage is greater than zero (an
# error occurred), but the next stage has passed successfully.
if [[ "$PROCESS_EXIT_CODE" -gt 0 && "$NEW_EXIT_CODE" -eq 0 ]]; then
# E.g.: "post_deploy" failed but "server_cleaner" succeeded.
echo "==> WARNING: An attempt to downgrade the \"$PROCESS_EXIT_CODE\" exit code to zero has been refused."
else
PROCESS_EXIT_CODE="$NEW_EXIT_CODE"
echo "$PROCESS_EXIT_CODE" > "$PROCESS_LOCK_ID"
fi
}
post() {
ci_lock $?
bash -c "ci_hook ${FUNCNAME[0]} clean"
}
clean() {
ci_lock $?
bash -c "ci_hook ${FUNCNAME[0]} finish"
}
finish() {
ci_lock $?
ci_run finish
}
# ------------------------------------------------------------------------------
export -f ci_run ci_hook ci_lock post clean finish
ci_hook pre post
ci_run main
|
// Copyright (c) 2018-present Baidu, Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <stdint.h>
#include <fstream>
#include <atomic>
#include <boost/lexical_cast.hpp>
#ifdef BAIDU_INTERNAL
#include <base/iobuf.h>
#include <base/containers/bounded_queue.h>
#include <base/time.h>
#include <raft/raft.h>
#include <raft/util.h>
#include <raft/storage.h>
#include <raft/snapshot_throttle.h>
#else
#include <butil/iobuf.h>
#include <butil/containers/bounded_queue.h>
#include <butil/time.h>
#include <braft/raft.h>
#include <braft/util.h>
#include <braft/storage.h>
#include <braft/snapshot_throttle.h>
#endif
#include "common.h"
#include "schema_factory.h"
#include "table_key.h"
#include "mut_table_key.h"
#include "rocks_wrapper.h"
#include "split_compaction_filter.h"
#include "proto/common.pb.h"
#include "proto/meta.interface.pb.h"
#include "proto/store.interface.pb.h"
#include "reverse_index.h"
#include "transaction_pool.h"
//#include "region_resource.h"
#include "runtime_state.h"
#include "runtime_state_pool.h"
#include "rapidjson/document.h"
#include "rocksdb_file_system_adaptor.h"
#include "region_control.h"
#include "meta_writer.h"
#include "rpc_sender.h"
#include "exec_node.h"
#include "concurrency.h"
#include "backup.h"
#ifdef BAIDU_INTERNAL
#else
//开源编译,等raft learner开源后删除
#include <braft/raft.h>
namespace braft {
class Learner {
public:
Learner(const GroupId& group_id, const PeerId& peer_id) {
}
int init(const NodeOptions& options) {
return 0;
}
void shutdown(Closure* done) {
}
void join() {
}
void snapshot(Closure* done) {
}
void get_status(NodeStatus* status) {
}
};
}
#endif
using google::protobuf::Message;
using google::protobuf::RepeatedPtrField;
namespace baikaldb {
DECLARE_int64(disable_write_wait_timeout_us);
DECLARE_int32(prepare_slow_down_wait);
static std::atomic<int64_t> ttl_remove_rows = { 0 }; // ttl删除行数计数
static const int32_t RECV_QUEUE_SIZE = 128;
struct StatisticsInfo {
int64_t time_cost_sum;
int64_t end_time_us;
};
enum BinlogType {
PREWRITE_BINLOG,
COMMIT_BINLOG,
ROLLBACK_BINLOG,
FAKE_BINLOG
};
inline const char* binlog_type_name(const BinlogType type) {
if (type == PREWRITE_BINLOG) {
return "PREWRITE_BINLOG";
} else if (type == COMMIT_BINLOG) {
return "COMMIT_BINLOG";
} else if (type == ROLLBACK_BINLOG) {
return "ROLLBACK_BINLOG";
} else {
return "FAKE_BINLOG";
}
}
struct BinlogDesc {
int64_t primary_region_id = 0;
int64_t txn_id;
BinlogType binlog_type;
TimeCost time;
};
struct ApproximateInfo {
int64_t table_lines = 0;
uint64_t region_size = 0;
TimeCost time_cost;
//上次分裂的大小,分裂后不做compaction,则新的大小不会变化
//TODO:是否持久化存储,重启后,新老大小差不多则可以做compaction
uint64_t last_version_region_size = 0;
uint64_t last_version_table_lines = 0;
TimeCost last_version_time_cost;
};
class region;
class ScopeProcStatus {
public:
ScopeProcStatus(Region* region) : _region(region) {}
~ScopeProcStatus();
void reset() {
_region = NULL;
}
private:
Region* _region;
};
class ScopeMergeStatus {
public:
ScopeMergeStatus(Region* region) : _region(region) {}
~ScopeMergeStatus();
void reset() {
_region = NULL;
}
private:
Region* _region;
};
class TransactionPool;
typedef std::shared_ptr<Region> SmartRegion;
class Region : public braft::StateMachine, public std::enable_shared_from_this<Region> {
friend class RegionControl;
friend class Backup;
public:
static const uint8_t PRIMARY_INDEX_FLAG;
static const uint8_t SECOND_INDEX_FLAG;
virtual ~Region() {
shutdown();
join();
for (auto& pair : _reverse_index_map) {
delete pair.second;
}
}
void wait_async_apply_log_queue_empty() {
BthreadCond cond;
cond.increase();
_async_apply_log_queue.run([&cond]() {
cond.decrease_signal();
});
cond.wait();
}
void shutdown() {
if (get_version() == 0) {
wait_async_apply_log_queue_empty();
_async_apply_param.stop_adjust_stall();
}
if (_need_decrease) {
_need_decrease = false;
Concurrency::get_instance()->recieve_add_peer_concurrency.decrease_broadcast();
}
bool expected_status = false;
if (_shutdown.compare_exchange_strong(expected_status, true)) {
is_learner() ? _learner->shutdown(NULL) : _node.shutdown(NULL);
_init_success = false;
DB_WARNING("raft node was shutdown, region_id: %ld", _region_id);
}
}
void join() {
is_learner() ? _learner->join() : _node.join();
DB_WARNING("raft node join completely, region_id: %ld", _region_id);
_real_writing_cond.wait();
_disable_write_cond.wait();
_multi_thread_cond.wait();
DB_WARNING("_multi_thread_cond wait success, region_id: %ld", _region_id);
_txn_pool.close();
}
void get_node_status(braft::NodeStatus* status) {
is_learner() ? _learner->get_status(status) : _node.get_status(status);
}
Region(RocksWrapper* rocksdb,
SchemaFactory* factory,
const std::string& address,
const braft::GroupId& groupId,
const braft::PeerId& peerId,
const pb::RegionInfo& region_info,
int64_t region_id,
bool is_learner = false) :
_rocksdb(rocksdb),
_factory(factory),
_address(address),
_region_info(region_info),
_region_id(region_id),
_node(groupId, peerId),
_is_leader(false),
_shutdown(false),
_num_table_lines(0),
_num_delete_lines(0),
_region_control(this, region_id),
_snapshot_adaptor(new RocksdbFileSystemAdaptor(region_id)), _is_learner(is_learner),
_not_leader_alarm(region_id, peerId) {
//create table and add peer请求状态初始化都为IDLE, 分裂请求状态初始化为DOING
_region_control.store_status(_region_info.status());
_version = _region_info.version();
_is_global_index = _region_info.has_main_table_id() &&
_region_info.main_table_id() != 0 &&
_region_info.table_id() != _region_info.main_table_id();
_global_index_id = _region_info.table_id();
_table_id = _is_global_index ? _region_info.main_table_id() : _region_info.table_id();
if (_region_info.has_is_binlog_region()) {
_is_binlog_region = _region_info.is_binlog_region();
}
if (_is_learner) {
_learner.reset(new braft::Learner(groupId, peerId));
}
}
int init(bool new_region, int32_t snapshot_times);
void wait_table_info() {
while (!SchemaFactory::get_instance()->exist_tableid(get_table_id())) {
DB_WARNING("region_id: %ld wait for table_info: %ld", _region_id, get_table_id());
bthread_usleep(1000 * 1000);
}
}
void raft_control(google::protobuf::RpcController* controller,
const pb::RaftControlRequest* request,
pb::RaftControlResponse* response,
google::protobuf::Closure* done) {
_region_control.raft_control(controller, request, response, done);
};
void async_apply_log_entry(google::protobuf::RpcController* controller,
const pb::BatchStoreReq* request,
pb::BatchStoreRes* response,
google::protobuf::Closure* done);
void query(google::protobuf::RpcController* controller,
const pb::StoreReq* request,
pb::StoreRes* response,
google::protobuf::Closure* done);
void query_binlog(google::protobuf::RpcController* controller,
const pb::StoreReq* request,
pb::StoreRes* response,
google::protobuf::Closure* done);
void dml(const pb::StoreReq& request,
pb::StoreRes& response,
int64_t applied_index,
int64_t term, bool need_txn_limit);
void dml_2pc(const pb::StoreReq& request,
pb::OpType op_type,
const pb::Plan& plan,
const RepeatedPtrField<pb::TupleDescriptor>& tuples,
pb::StoreRes& response,
int64_t applied_index,
int64_t term,
int seq_id, bool need_txn_limit);
void dml_1pc(const pb::StoreReq& request,
pb::OpType op_type,
const pb::Plan& plan,
const RepeatedPtrField<pb::TupleDescriptor>& tuples,
pb::StoreRes& response,
int64_t applied_index,
int64_t term,
braft::Closure* done);
int select(const pb::StoreReq& request, pb::StoreRes& response);
int select(const pb::StoreReq& request,
const pb::Plan& plan,
const RepeatedPtrField<pb::TupleDescriptor>& tuples,
pb::StoreRes& response);
int select_normal(RuntimeState& state, ExecNode* root, pb::StoreRes& response);
int select_sample(RuntimeState& state, ExecNode* root, const pb::AnalyzeInfo& analyze_info, pb::StoreRes& response);
void do_apply(int64_t term, int64_t index, const pb::StoreReq& request, braft::Closure* done);
virtual void on_apply(braft::Iterator& iter);
virtual void on_shutdown();
virtual void on_leader_start(int64_t term);
virtual void on_snapshot_save(braft::SnapshotWriter* writer, braft::Closure* done);
virtual int on_snapshot_load(braft::SnapshotReader* reader);
virtual void on_leader_stop();
virtual void on_leader_stop(const butil::Status& status);
virtual void on_error(const ::braft::Error& e);
virtual void on_configuration_committed(const ::braft::Configuration& conf);
virtual void on_configuration_committed(const ::braft::Configuration& conf, int64_t index);
void snapshot(braft::Closure* done);
void on_snapshot_load_for_restart(braft::SnapshotReader* reader,
std::map<int64_t, std::string>& prepared_log_entrys);
void construct_heart_beat_request(pb::StoreHeartBeatRequest& request, bool need_peer_balance);
void construct_peers_status(pb::LeaderHeartBeat* leader_heart);
void set_can_add_peer();
//leader收到从metaServer心跳包中的解析出来的add_peer请求
void add_peer(const pb::AddPeer& add_peer, SmartRegion region, ExecutionQueue& queue) {
_region_control.add_peer(add_peer, region, queue);
}
RegionControl& get_region_control() {
return _region_control;
}
void add_peer(const pb::AddPeer* request,
pb::StoreRes* response,
google::protobuf::Closure* done) {
_region_control.add_peer(request, response, done);
}
void do_snapshot() {
_region_control.sync_do_snapshot();
}
int transfer_leader(const pb::TransLeaderRequest& trans_leader_request,
SmartRegion region, ExecutionQueue& queue) {
return _region_control.transfer_leader(trans_leader_request, region, queue);
}
void reset_region_status () {
_region_control.reset_region_status();
}
void reset_snapshot_status();
pb::RegionStatus get_status() const {
return _region_control.get_status();
}
//int clear_data();
void compact_data_in_queue();
int ingest_snapshot_sst(const std::string& dir);
int ingest_sst(const std::string& data_sst_file, const std::string& meta_sst_file);
// other thread
void reverse_merge();
// other thread
void ttl_remove_expired_data();
// dump the the tuples in this region in format {{k1:v1},{k2:v2},{k3,v3}...}
// used for debug
std::string dump_hex();
//on_apply里调用的方法
void start_split(braft::Closure* done, int64_t applied_index, int64_t term);
void start_split_for_tail(braft::Closure* done, int64_t applied_index, int64_t term);
void validate_and_add_version(const pb::StoreReq& request, braft::Closure* done, int64_t applied_index, int64_t term);
void add_version_for_split_region(const pb::StoreReq& request, braft::Closure* done, int64_t applied_index, int64_t term);
void apply_txn_request(const pb::StoreReq& request, braft::Closure* done, int64_t index, int64_t term);
void adjustkey_and_add_version(const pb::StoreReq& request,
braft::Closure* done,
int64_t applied_index,
int64_t term);
void exec_update_primary_timestamp(const pb::StoreReq& request,
braft::Closure* done, int64_t applied_index, int64_t term);
void adjustkey_and_add_version_query(google::protobuf::RpcController* controller,
const pb::StoreReq* request,
pb::StoreRes* response,
google::protobuf::Closure* done);
//开始做merge操作
void start_process_merge(const pb::RegionMergeResponse& merge_response);
//开始做split操作
//第一步通过raft状态机,创建迭代器,取出当前的index,自此之后的log不能再删除
void start_process_split(const pb::RegionSplitResponse& split_response,
bool tail_split,
const std::string& split_key,
int64_t key_term);
void get_split_key_for_tail_split();
void adjust_num_table_lines();
//split第二步,发送迭代器数据
void write_local_rocksdb_for_split();
int replay_txn_for_recovery(
const std::unordered_map<uint64_t, pb::TransactionInfo>& prepared_txn);
int replay_applied_txn_for_recovery(
int64_t region_id,
const std::string& instance,
std::string start_key,
const std::unordered_map<uint64_t, pb::TransactionInfo>& applied_txn);
void send_log_entry_to_new_region_for_split();
int split_region_add_peer(std::string& new_region_leader);
void split_remove_new_region_peers() {
start_thread_to_remove_region(_split_param.new_region_id, _split_param.instance);
for (auto& peer : _split_param.add_peer_instances) {
start_thread_to_remove_region(_split_param.new_region_id, peer);
}
}
//split 第三步, 通知被分裂出来的region分裂完成, 增加old_region的version, update end_key
void send_complete_to_new_region_for_split();
//分裂第四步完成
void complete_split();
void transfer_leader_after_split();
//从split开始之后所有的entry数据作为分裂的增量部分
// 1说明还有数据,0说明到头了
int get_log_entry_for_split(const int64_t start_index,
const int64_t expected_term,
std::vector<pb::BatchStoreReq>& requests,
std::vector<butil::IOBuf>& req_datas, // cntl attachment的数据
int64_t& split_end_index);
int get_split_key(std::string& split_key, int64_t& split_key_term);
int64_t get_region_id() const {
return _region_id;
}
void update_average_cost(int64_t request_time_cost);
void reset_split_status() {
if (_split_param.snapshot != nullptr) {
_rocksdb->get_db()->ReleaseSnapshot(_split_param.snapshot);
}
_split_param.reset_status();
}
void real_writing_decrease() {
_real_writing_cond.decrease_signal();
}
void reset_allow_write() {
_disable_write_cond.decrease_broadcast();
}
void set_disable_write() {
_disable_write_cond.increase();
}
int32_t num_prepared() {
return _txn_pool.num_prepared();
}
int32_t num_began() {
return _txn_pool.num_began();
}
int64_t get_split_index() {
return _split_param.split_start_index;
}
void set_used_size(int64_t used_size) {
std::lock_guard<std::mutex> lock(_region_lock);
_region_info.set_used_size(used_size);
}
std::string get_start_key() {
std::lock_guard<std::mutex> lock(_region_lock);
return _region_info.start_key();
}
std::string get_end_key() {
std::lock_guard<std::mutex> lock(_region_lock);
return _region_info.end_key();
}
int64_t get_partition_num() {
std::lock_guard<std::mutex> lock(_region_lock);
if (_region_info.has_partition_num()) {
return _region_info.partition_num();
}
return 1;
}
rocksdb::Range get_rocksdb_range() {
return rocksdb::Range(_rocksdb_start, _rocksdb_end);
}
bool is_merged() {
std::lock_guard<std::mutex> lock(_region_lock);
if (!_region_info.start_key().empty()) {
return _region_info.start_key() == _region_info.end_key();
}
return false;
}
int64_t get_log_index() const {
return _applied_index;
}
int64_t get_data_index() const {
return _data_index;
}
int64_t get_log_index_lastcycle() const {
return _applied_index_lastcycle;
}
void reset_log_index_lastcycle() {
_applied_index_lastcycle = _applied_index;
_lastcycle_time_cost.reset();
}
int64_t get_lastcycle_timecost() const {
return _lastcycle_time_cost.get_time();
}
int64_t get_last_split_time_cost() const {
return _last_split_time_cost.get_time();
}
rocksdb::ColumnFamilyHandle* get_data_cf() const {
return _data_cf;
}
butil::EndPoint get_leader() {
if (is_learner()) {
butil::EndPoint leader;
butil::str2endpoint(region_info().leader().c_str(), &leader);
return leader;
}
return _node.leader_id().addr;
}
int64_t get_used_size() {
std::lock_guard<std::mutex> lock(_region_lock);
return _region_info.used_size();
}
int64_t get_table_id() {
return _table_id;
}
int64_t get_global_index_id() {
return _global_index_id;
}
bool is_leader() {
return (_is_leader.load());
}
void leader_start(int64_t term) {
_is_leader.store(true);
_not_leader_alarm.reset();
_expected_term = term;
DB_WARNING("leader real start, region_id: %ld term: %ld", _region_id, term);
}
int64_t get_version() {
return _version;
}
int64_t get_dml_latency() {
return _dml_time_cost.latency();
}
pb::RegionInfo& region_info() {
return _region_info;
}
std::shared_ptr<RegionResource> get_resource() {
BAIDU_SCOPED_LOCK(_ptr_mutex);
return _resource;
}
bool check_region_legal_complete();
bool compare_and_set_illegal() {
std::unique_lock<std::mutex> lock(_legal_mutex);
std::lock_guard<std::mutex> lock_region(_region_lock);
if (_region_info.version() <= 0) {
_legal_region = false;
return true;
}
return false;
}
bool compare_and_set_legal_for_split() {
std::unique_lock<std::mutex> lock(_legal_mutex);
if (_legal_region) {
std::lock_guard<std::mutex> lock_region(_region_lock);
_region_info.set_version(1);
DB_WARNING("compare and set split verison to 1, region_id: %ld", _region_id);
return true;
}
return false;
}
bool compare_and_set_legal() {
std::unique_lock<std::mutex> lock(_legal_mutex);
if (_legal_region) {
return true;
}
return false;
}
int64_t get_num_table_lines() {
return _num_table_lines.load();
}
bool is_tail() {
std::lock_guard<std::mutex> lock(_region_lock);
return (_region_info.end_key().empty());
}
bool is_head() {
std::lock_guard<std::mutex> lock(_region_lock);
return (_region_info.start_key().empty());
}
bool empty() {
std::lock_guard<std::mutex> lock(_region_lock);
return (_region_info.start_key() == _region_info.end_key()
&& !_region_info.end_key().empty()
&& !_region_info.start_key().empty());
}
int64_t get_timecost() {
return _time_cost.get_time();
}
void reset_timecost() {
return _time_cost.reset();
}
void set_num_table_lines(int64_t table_line) {
MetaWriter::get_instance()->update_num_table_lines(_region_id, table_line);
_num_table_lines.store(table_line);
DB_WARNING("region_id: %ld, table_line:%ld", _region_id, _num_table_lines.load());
}
bool removed() const {
return _removed;
}
bool is_binlog_region() const { return _is_binlog_region; }
void set_removed(bool removed) {
_removed = removed;
_removed_time_cost.reset();
}
int64_t removed_time_cost() const {
return _removed_time_cost.get_time();
}
int64_t get_split_wait_time() {
int64_t wait_time = FLAGS_disable_write_wait_timeout_us;
if (FLAGS_disable_write_wait_timeout_us < _split_param.split_slow_down_cost * 10) {
wait_time = _split_param.split_slow_down_cost * 10;
}
if (wait_time > 30 * 1000 * 1000LL) {
//DB_WARNING("split wait time exceed 30s, region_id: %ld", _region_id);
wait_time = 30 * 1000 * 1000LL;
}
return wait_time;
}
void exec_in_txn_query(google::protobuf::RpcController* controller,
const pb::StoreReq* request,
pb::StoreRes* response,
google::protobuf::Closure* done);
void exec_out_txn_query(google::protobuf::RpcController* controller,
const pb::StoreReq* request,
pb::StoreRes* response,
google::protobuf::Closure* done);
void exec_txn_query_primary_region(google::protobuf::RpcController* controller,
const pb::StoreReq* request,
pb::StoreRes* response,
google::protobuf::Closure* done);
void exec_txn_complete(google::protobuf::RpcController* controller,
const pb::StoreReq* request,
pb::StoreRes* response,
google::protobuf::Closure* done);
void exec_txn_query_state(google::protobuf::RpcController* controller,
const pb::StoreReq* request,
pb::StoreRes* response,
google::protobuf::Closure* done);
void exec_dml_out_txn_query(const pb::StoreReq* request,
pb::StoreRes* response,
google::protobuf::Closure* done);
int execute_cached_cmd(const pb::StoreReq& request, pb::StoreRes& response,
uint64_t txn_id,
SmartTransaction& txn,
int64_t applied_index,
int64_t term,
uint64_t log_id);
void clear_transactions() {
if (_shutdown || !_init_success || get_version() <= 0) {
return;
}
_multi_thread_cond.increase();
_txn_pool.clear_transactions(this);
_multi_thread_cond.decrease_signal();
}
void update_ttl_info() {
if (_shutdown || !_init_success || get_version() <= 0) {
return;
}
TTLInfo ttl_info = _factory->get_ttl_duration(get_table_id());
if (ttl_info.ttl_duration_s > 0 && ttl_info.online_ttl_expire_time_us > 0) {
// online TTL
if (ttl_info.online_ttl_expire_time_us != _online_ttl_base_expire_time_us) {
_online_ttl_base_expire_time_us = ttl_info.online_ttl_expire_time_us;
_use_ttl = true;
_txn_pool.update_ttl_info(_use_ttl, _online_ttl_base_expire_time_us);
DB_WARNING("table_id: %ld, region_id: %ld, ttl_duration_s: %ld, online_ttl_expire_time_us: %ld, %s",
get_table_id(), _region_id, ttl_info.ttl_duration_s,
ttl_info.online_ttl_expire_time_us, timestamp_to_str(ttl_info.online_ttl_expire_time_us/1000000).c_str());
}
}
}
void clear_orphan_transactions(braft::Closure* done, int64_t applied_index, int64_t term);
void apply_clear_transactions_log();
TransactionPool& get_txn_pool() {
return _txn_pool;
}
void rollback_txn_before(int64_t timeout) {
return _txn_pool.rollback_txn_before(timeout);
}
void start_thread_to_remove_region(int64_t drop_region_id, std::string instance_address) {
Bthread bth(&BTHREAD_ATTR_SMALL);
std::function<void()> remove_region_function =
[this, drop_region_id, instance_address]() {
_multi_thread_cond.increase();
RpcSender::send_remove_region_method(drop_region_id, instance_address);
_multi_thread_cond.decrease_signal();
};
bth.run(remove_region_function);
}
void set_restart(bool restart) {
_restart = restart;
}
//现在支持replica_num的修改,从region_info里去replica_num已经不准确
//bool peers_stable() {
// std::vector<braft::PeerId> peers;
// return _node.list_peers(&peers).ok() && peers.size() >= (size_t)_region_info.replica_num();
//}
void copy_region(pb::RegionInfo* region_info) {
std::lock_guard<std::mutex> lock(_region_lock);
region_info->CopyFrom(_region_info);
}
void kv_apply_raft(RuntimeState* state, SmartTransaction txn);
void set_separate_switch(bool is_separate) {
_storage_compute_separate = is_separate;
}
void lock_commit_meta_mutex() {
_commit_meta_mutex.lock();
}
void unlock_commit_meta_mutex() {
_commit_meta_mutex.unlock();
}
void put_commit_ts(const uint64_t txn_id, int64_t commit_ts) {
std::unique_lock<bthread::Mutex> lck(_commit_ts_map_lock);
_commit_ts_map[txn_id] = commit_ts;
if (_commit_ts_map.size() > 100000) {
// 一天阈值
int64_t threshold_value = commit_ts - 86400000LL;
auto iter = _commit_ts_map.begin();
while (iter != _commit_ts_map.end()) {
if (iter->second < threshold_value) {
iter = _commit_ts_map.erase(iter);
} else {
++iter;
}
}
}
}
int64_t get_commit_ts(uint64_t txn_id, int64_t start_ts) {
std::unique_lock<bthread::Mutex> lck(_commit_ts_map_lock);
if (_commit_ts_map.count(txn_id) == 0) {
return -1;
}
return _commit_ts_map[txn_id];
}
void remove_local_index_data();
void delete_local_rocksdb_for_ddl(int64_t table_id, int64_t index_id);
int add_reverse_index(int64_t table_id, int64_t index_id);
void process_download_sst(brpc::Controller* controller,
std::vector<std::string>& req_vec, SstBackupType type);
void process_upload_sst(brpc::Controller* controller, bool is_ingest);
void process_download_sst_streaming(brpc::Controller* controller,
const pb::BackupRequest* request,
pb::BackupResponse* response);
void process_upload_sst_streaming(brpc::Controller* controller, bool is_ingest,
const pb::BackupRequest* request,
pb::BackupResponse* response);
std::shared_ptr<Region> get_ptr() {
return shared_from_this();
}
uint64_t snapshot_data_size() const {
return _snapshot_data_size;
}
void set_snapshot_data_size(size_t size) {
_snapshot_data_size = size;
}
uint64_t snapshot_meta_size() const {
return _snapshot_meta_size;
}
void set_snapshot_meta_size(size_t size) {
_snapshot_meta_size = size;
}
bool is_addpeer() const {
return _region_info.can_add_peer();
}
uint64_t get_approx_size() {
//分裂后一段时间每超过10分钟,或者超过10%的数据量diff则需要重新获取
if (_approx_info.time_cost.get_time() > 10 * 60 * 1000 * 1000LL &&
_approx_info.last_version_time_cost.get_time() < 2 * 60 * 60 * 1000 * 1000LL) {
return UINT64_MAX;
} else {
int64_t diff_lines = abs(_num_table_lines.load() - _approx_info.table_lines);
if (diff_lines * 10 > _num_table_lines.load()) {
// adjust_num_table_lines();
return UINT64_MAX;
}
}
return _approx_info.region_size;
}
void set_approx_size(uint64_t region_size) {
_approx_info.time_cost.reset();
_approx_info.table_lines = _num_table_lines.load();
_approx_info.region_size = region_size;
}
bool can_use_approximate_split();
int binlog_scan_when_restart();
void binlog_timeout_check(int64_t rollback_ts);
void binlog_fake(int64_t ts, BthreadCond& cond);
pb::PeerStatus region_status() const {
return _region_status;
}
int64_t snapshot_index() const {
return _snapshot_index;
}
bool is_learner() const {
return _is_learner;
}
bool is_disable_write() {
return _disable_write_cond.count() > 0;
}
bool is_dml_op_type(const pb::OpType& op_type) {
if (op_type == pb::OP_INSERT
|| op_type == pb::OP_DELETE
|| op_type == pb::OP_UPDATE
|| op_type == pb::OP_SELECT_FOR_UPDATE) {
return true;
}
return false;
}
private:
struct SplitParam {
int64_t split_start_index = INT_FAST64_MAX;
int64_t split_end_index = 0;
int64_t split_term = 0;
int64_t new_region_id = 0;
int64_t reduce_num_lines = 0; //非精确,todo需要精确计数
bool split_slow_down = false;
int64_t split_slow_down_cost = 0;
int err_code = 0;
std::string split_key;
//std::string old_end_key;
std::string instance;
std::vector<std::string> add_peer_instances;
TimeCost total_cost;
TimeCost no_write_time_cost;
int64_t new_region_cost;
TimeCost op_start_split;
int64_t op_start_split_cost;
TimeCost op_start_split_for_tail;
int64_t op_start_split_for_tail_cost;
TimeCost op_snapshot;
TimeCost add_peer_cost;
int64_t op_snapshot_cost;
int64_t write_sst_cost;
int64_t send_first_log_entry_cost;
int64_t write_wait_cost;
int64_t send_second_log_entry_cost;
int64_t send_complete_to_new_region_cost;
TimeCost op_add_version;
int64_t op_add_version_cost;
const rocksdb::Snapshot* snapshot = nullptr;
bool tail_split = false;
std::unordered_map<uint64_t, pb::TransactionInfo> applied_txn;
void reset_status() {
split_start_index = INT_FAST64_MAX;
split_end_index = 0;
split_term = 0;
new_region_id = 0;
split_slow_down = false;
split_slow_down_cost = 0;
err_code = 0;
split_key = "";
instance = "";
reduce_num_lines = 0;
tail_split = false;
snapshot = nullptr;
applied_txn.clear();
add_peer_instances.clear();
};
};
struct BinlogParam {
std::map<int64_t, BinlogDesc> ts_binlog_map; // 用于缓存prewrite binlog元数据,便于收到commit binlog时快速反查
int64_t min_ts_in_map = -1; // ts_binlog_map中最小ts,每一轮扫描之后更新
int64_t max_ts_in_map = -1; // ts_binlog_map中最大ts,如果收到比max ts还大的binlog,则直接写rocksdb不更新map,map靠之后定时线程更新
int64_t check_point_ts = -1; // 检查点,检查点之前的binlog都已经commit,重启之后从检查点开始扫描
int64_t oldest_ts = -1; // rocksdb中最小ts,如果region 某个peer迁移,binlog数据不迁移则oldest_ts改为当前ts
std::map<int64_t, bool> timeout_start_ts_done; // 标记超时反查的start_ts, 仅用来避免重复commit导致的报警,不用于严格一致性场景
};
//binlog function
void recover_binlog();
void read_binlog(const pb::StoreReq* request, pb::StoreRes* response);
void apply_binlog(const pb::StoreReq& request, braft::Closure* done);
int write_binlog_record(SmartRecord record);
int write_binlog_value(const std::map<std::string, ExprValue>& field_value_map);
int64_t binlog_get_int64_val(const std::string& name, const std::map<std::string, ExprValue>& field_value_map);
std::string binlog_get_str_val(const std::string& name, const std::map<std::string, ExprValue>& field_value_map);
void binlog_get_scan_fields(std::map<int32_t, FieldInfo*>& field_ids, std::vector<int32_t>& field_slot);
void binlog_get_field_values(std::map<std::string, ExprValue>& field_value_map, SmartRecord record);
int binlog_reset_on_snapshot_load_restart();
int binlog_reset_on_snapshot_load();
void binlog_update_map_when_scan(const std::map<std::string, ExprValue>& field_value_map);
int binlog_update_map_when_apply(const std::map<std::string, ExprValue>& field_value_map, const std::string& remote_side);
int binlog_update_check_point();
int get_primary_region_info(int64_t primary_region_id, pb::RegionInfo& region_info);
void binlog_query_primary_region(const int64_t& start_ts, const int64_t& txn_id, pb::RegionInfo& region_info, int64_t rollback_ts);
void binlog_fill_exprvalue(const pb::BinlogDesc& binlog_desc, pb::OpType op_type, std::map<std::string, ExprValue>& field_value_map);
//binlog end
void apply_kv_in_txn(const pb::StoreReq& request, braft::Closure* done,
int64_t index, int64_t term);
void apply_kv_out_txn(const pb::StoreReq& request, braft::Closure* done,
int64_t index, int64_t term);
void apply_kv_split(const pb::StoreReq& request, braft::Closure* done,
int64_t index, int64_t term);
bool validate_version(const pb::StoreReq* request, pb::StoreRes* response);
void print_log_entry(const int64_t start_index, const int64_t end_index);
void set_region(const pb::RegionInfo& region_info) {
std::lock_guard<std::mutex> lock(_region_lock);
_region_info.CopyFrom(region_info);
_version = _region_info.version();
}
void set_region_with_update_range(const pb::RegionInfo& region_info) {
std::lock_guard<std::mutex> lock(_region_lock);
_region_info.CopyFrom(region_info);
_version = _region_info.version();
// region_info更新range,替换resource
std::shared_ptr<RegionResource> new_resource(new RegionResource);
*new_resource = *_resource;
new_resource->region_info = region_info;
{
BAIDU_SCOPED_LOCK(_ptr_mutex);
_resource = new_resource;
}
//compaction时候删掉多余的数据
if (_is_binlog_region) {
//binlog region把start key和end key设置为空,防止filter把数据删掉
SplitCompactionFilter::get_instance()->set_filter_region_info(
_region_id, "", false, 0);
} else {
SplitCompactionFilter::get_instance()->set_filter_region_info(
_region_id, region_info.end_key(),
_use_ttl, _online_ttl_base_expire_time_us);
}
DB_WARNING("region_id: %ld, start_key: %s, end_key: %s", _region_id,
rocksdb::Slice(region_info.start_key()).ToString(true).c_str(),
rocksdb::Slice(region_info.end_key()).ToString(true).c_str());
}
// if seek_table_lines != nullptr, seek all sst for seek_table_lines
bool has_sst_data(int64_t* seek_table_lines);
bool ingest_has_sst_data();
bool wait_rocksdb_normal(int64_t timeout = -1) {
TimeCost cost;
TimeCost total_cost;
while (_rocksdb->is_any_stall()) {
if (timeout > 0 && total_cost.get_time() > timeout) {
return false;
}
if (cost.get_time() > 60 * 1000 * 1000) {
DB_WARNING("region_id: %ld wait for rocksdb stall", _region_id);
cost.reset();
}
reset_timecost();
bthread_usleep(1 * 1000 * 1000);
}
return true;
}
int check_learner_snapshot();
int check_follower_snapshot(const std::string& peer);
bool learner_ready_for_read() const {
return _learner_ready_for_read;
}
void update_binlog_read_max_ts(int64_t ts) {
int64_t max_ts = _binlog_read_max_ts.load();
while (max_ts < ts) {
if (_binlog_read_max_ts.compare_exchange_strong(max_ts, ts)) {
break;
}
max_ts = _binlog_read_max_ts.load();
}
}
private:
//Singleton
RocksWrapper* _rocksdb;
SchemaFactory* _factory;
rocksdb::ColumnFamilyHandle* _data_cf;
rocksdb::ColumnFamilyHandle* _meta_cf;
std::string _address; //ip:port
//region metainfo
pb::RegionInfo _region_info;
std::mutex _region_lock;
//split后缓存分裂出去的region信息供baikaldb使用
std::vector<pb::RegionInfo> _new_region_infos;
size_t _snapshot_data_size = 0;
size_t _snapshot_meta_size = 0;
pb::RegionInfo _new_region_info;
int64_t _region_id = 0;
int64_t _version = 0;
int64_t _table_id = 0; // region.main_table_id
int64_t _global_index_id = 0; //region.table_id
//merge后该region为空,记录目标region,供baikaldb使用,只会merge一次,不必使用vector
pb::RegionInfo _merge_region_info;
// 倒排索引需要
// todo liguoqiang 如何初始化这个
std::map<int64_t, ReverseIndexBase*> _reverse_index_map;
// todo 是否可以改成无锁的
BthreadCond _disable_write_cond;
BthreadCond _real_writing_cond;
SplitParam _split_param;
std::mutex _legal_mutex;
bool _legal_region = true;
TimeCost _time_cost; //上次收到请求的时间,每次收到请求都重置一次
LatencyOnly _dml_time_cost;
bool _restart = false;
//计算存储分离开关,在store定时任务中更新,避免每次dml都访问schema factory
bool _storage_compute_separate = false;
bool _use_ttl = false; // online TTL会更新,只会false 变为true
int64_t _online_ttl_base_expire_time_us = 0; // 存量数据过期时间,仅online TTL的表使用
bool _reverse_remove_range = false; //split的数据,把拉链过滤一遍
//raft node
braft::Node _node;
std::atomic<bool> _is_leader;
// 一般情况下,_braft_apply_index和_applied_index是一致的
// 只有在加速分裂进行异步发送logEntry的时候,_braft_apply_index > _applied_index
// 两者diff值即为executionQueue里面排队的请求数
int64_t _braft_apply_index = 0;
int64_t _applied_index = 0; //current log index
// 表示数据版本,conf_change,no_op等不影响数据时版本不变
int64_t _data_index = 0;
int64_t _expected_term = -1;
// bthread cycle: set _applied_index_lastcycle = _applied_index when _num_table_lines == 0
int64_t _applied_index_lastcycle = 0;
TimeCost _lastcycle_time_cost; //定时线程上次循环的时间,更新_applied_index_lastcycle时更新
TimeCost _last_split_time_cost; //上次分裂时间戳
ApproximateInfo _approx_info;
bool _report_peer_info = false;
std::atomic<bool> _shutdown;
bool _init_success = false;
bool _need_decrease = false; // addpeer时候从init到on_snapshot_load整体限制
bool _can_heartbeat = false;
BthreadCond _multi_thread_cond;
// region stat variables
// TODO:num_table_lines维护太麻烦,后续要考虑使用预估的方式获取
std::atomic<int64_t> _num_table_lines; //total number of pk record in this region
std::atomic<int64_t> _num_delete_lines; //total number of delete rows after last compact
int64_t _snapshot_num_table_lines = 0; //last snapshot number
TimeCost _snapshot_time_cost;
int64_t _snapshot_index = 0; //last snapshot log index
bool _removed = false;
TimeCost _removed_time_cost;
TransactionPool _txn_pool;
RuntimeStatePool _state_pool;
// shared_ptr is not thread safe when assign
std::mutex _ptr_mutex;
std::shared_ptr<RegionResource> _resource;
RegionControl _region_control;
MetaWriter* _meta_writer = nullptr;
bthread::Mutex _commit_meta_mutex;
scoped_refptr<braft::FileSystemAdaptor> _snapshot_adaptor = nullptr;
bool _is_global_index = false; //是否是全局索引的region
std::mutex _reverse_index_map_lock;
std::mutex _backup_lock;
Backup _backup;
//binlog
bool _is_binlog_region = false; //是否为binlog region
std::atomic<int64_t> _binlog_read_max_ts = { 0 }; // 读取binlog的最大ts
// txn_id:commit_ts
std::map<uint64_t, int64_t> _commit_ts_map;
bthread::Mutex _commit_ts_map_lock;
bthread::Mutex _binlog_param_mutex;
BinlogParam _binlog_param;
std::string _rocksdb_start;
std::string _rocksdb_end;
pb::PeerStatus _region_status = pb::STATUS_NORMAL;
//learner
std::unique_ptr<braft::Learner> _learner;
bool _is_learner = false;
bool _learner_ready_for_read = false;
TimeCost _learner_time;
//NOT_LEADER分类报警
struct NotLeaderAlarm {
enum AlarmType {
ALARM_INIT = 0,
LEADER_INVALID = 1,
LEADER_RAFT_FALL_BEHIND = 2,
LEADER_NOT_REAL_START = 3
};
NotLeaderAlarm (int64_t region_id, const braft::PeerId& node_id) :
type(ALARM_INIT), region_id(region_id), node_id(node_id) { }
void reset() {
leader_start = false;
alarm_begin_time.reset();
last_print_time.reset();
total_count = 0;
interval_count = 0;
type = ALARM_INIT;
}
void set_leader_start() { leader_start = true; }
void not_leader_alarm(const braft::PeerId& leader_id);
AlarmType type;
std::atomic<bool> leader_start = { false };
std::atomic<int> total_count = { 0 };
std::atomic<int> interval_count = { 0 };
TimeCost alarm_begin_time;
TimeCost last_print_time; // 每隔一段时间打印报警日志
const int64_t region_id;
const braft::PeerId node_id;
};
NotLeaderAlarm _not_leader_alarm;
struct AsyncApplyParam {
std::atomic<bool> has_adjust_stall = { false };
// 异步apply如果失败了,置标记,下次async_apply_log rpc会返回error
// 以及在add_version会检查这个标记
bool apply_log_failed = false;
void start_adjust_stall() {
if (!has_adjust_stall) {
RocksWrapper::get_instance()->begin_split_adjust_option();
has_adjust_stall = true;
}
}
void stop_adjust_stall() {
if (has_adjust_stall) {
RocksWrapper::get_instance()->stop_split_adjust_option();
has_adjust_stall = false;
}
}
};
AsyncApplyParam _async_apply_param;
ExecutionQueue _async_apply_log_queue;
};
} // end of namespace
|
import { CoordInterface } from './interfaces';
/**
* COORDINATE CLASS
* Coordinate is a basic class allowing to place elements on the grid
* The grid goes from top-left to bottom right
* Indices start at 0
*/
export default class Coord {
x: number;
y: number;
constructor(y: number, x: number) {
this.y = y;
this.x = x;
}
/**
* @returns coordinate at the top
*/
get up(): Coord {
return Coord.importCoord({ y: this.y - 1, x: this.x });
}
/**
* @returns coordinate at the bottom
*/
get down(): Coord {
return Coord.importCoord({ y: this.y + 1, x: this.x });
}
/**
* @returns coordinate at the left
*/
get left(): Coord {
return Coord.importCoord({ y: this.y, x: this.x - 1 });
}
/**
* @returns coordinate at the right
*/
get right(): Coord {
return Coord.importCoord({ y: this.y, x: this.x + 1 });
}
/**
* @returns list of adjacent cells
*/
get adjacent(): Coord[] {
return [this.up, this.right, this.down, this.left];
}
/**
* Test if the coordinate is outside of grid
* beware, doesn't check out of bounds without grid/rows values
* @returns boolean
*/
get outOfGrid(): boolean {
return this.x < 0 && this.y < 0;
}
/**
* Check if two coordinates are adjacent
* @returns boolean if cells are adjacent
*/
isAdjacent(coord: Coord): boolean {
return coord.isIncludedIn(this.adjacent);
}
/**
* Describe next coordinate in direction given
* @param angle angle direction
* @returns coordinate in direction
*/
fromAngle(directionAngle: number): Coord {
switch (directionAngle % 360) {
case 0:
return this.right;
case 90:
return this.up;
case 180:
return this.left;
case 270:
return this.down;
default:
throw Error(`Angle provided is not a multiple of 90°...`);
}
}
/**
* Test two coordinates for equality
* @param coord other coordinate to test for equality
* @returns boolean if equal
*/
equal(coord: Coord): boolean {
return this.x === coord.x && this.y === coord.y;
}
/**
* Test if a coordinate is included in a list of coordinates
* @param coords list of coordinates
* @returns boolean if coordinate is included in list
*/
isIncludedIn(coords: Coord[]): boolean {
return (
coords.filter((coord) => {
return this.equal(coord);
}).length > 0
);
}
/**
* Unique identifier of a coordinate in a cell
* @param rows width of grid
* @returns uid of cell in a grid
*/
uid(rows: number): number {
return this.y * rows + this.x;
}
/**
* SVG coordinate system: top-left point of cell
* @param cellSize Size in pixel of a cell
* @returns top-left coordinate of a cell
*/
pos(cellSize: number): CoordInterface {
const y = this.y * cellSize;
const x = this.x * cellSize;
return { y, x };
}
/**
* SVG coordinate system: center point of cell
* @param cellSize Size in pixel of a cell
* @returns top-left coordinate of a cell
*/
center(cellSize: number): CoordInterface {
const y = (this.y + 0.5) * cellSize;
const x = (this.x + 0.5) * cellSize;
return { y, x };
}
/**
* Output as an array of numbers
* @returns number array of coordinate
*/
get toArray(): number[] {
return [this.y, this.x];
}
/**
* Outputs a string for debug
* @returns string describing the coordinate
*/
toString(): string {
return `[Y:${this.y}, X:${this.x}]`;
}
/**
* Output to interface of primitives
* @returns interface describing coordinate
*/
exportCoord(): CoordInterface {
return {
y: this.y,
x: this.x
};
}
/**
* Create a coordinate class instance from a coordinate interface
* @param obj Coordinate interface
*/
static importCoord(json: CoordInterface): Coord {
return new Coord(json.y, json.x);
}
/**
* Create a coordinate class instance from a unique id and number of columns
* @param index unique id
* @param cols width of grid
*/
static fromId(index: number, cols: number): Coord {
const x = index % cols;
const y = Math.floor(index / cols);
return Coord.importCoord({ y, x });
}
}
|
#!/bin/bash
set -aueo pipefail
# shellcheck disable=SC1091
source .env
ENABLE_EGRESS="${ENABLE_EGRESS:-false}"
# kubectl delete namespace "$KIWIIRC_NAMESPACE" --ignore-not-found
kubectl create namespace "$KIWIIRC_NAMESPACE" || true
kubectl delete deployment kiwiirc -n kiwiirc --ignore-not-found
./scripts/create-container-registry-creds.sh
echo -e "Deploy KiwiIRC Service Account"
kubectl apply -f - <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: kiwiirc
namespace: $KIWIIRC_NAMESPACE
EOF
echo -e "Deploy KiwiIRC Service"
kubectl delete service -n "$KIWIIRC_NAMESPACE" kiwiirc --ignore-not-found
kubectl apply -f - <<EOF
apiVersion: v1
kind: Service
metadata:
name: kiwiirc
namespace: $KIWIIRC_NAMESPACE
labels:
app: kiwiirc
spec:
ports:
- port: 80
name: web
selector:
app: kiwiirc
EOF
echo -e "Deploy KiwiIRC Deployment"
kubectl delete deployment -n "$KIWIIRC_NAMESPACE" kiwiirc --ignore-not-found
kubectl apply -f - <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: kiwiirc
namespace: $KIWIIRC_NAMESPACE
spec:
replicas: 1
selector:
matchLabels:
app: kiwiirc
template:
metadata:
labels:
app: kiwiirc
version: v1
spec:
serviceAccountName: kiwiirc
containers:
# Main container with APP
- name: kiwiirc
image: osmci.azurecr.io/delyan-osm-a/kiwiirc:latest
imagePullPolicy: Always
imagePullSecrets:
- name: "$CTR_REGISTRY_CREDS_NAME"
EOF
echo "Create KiwiIRC Ingress Resource"
kubectl apply -f - <<EOF
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: kiwiirc
namespace: $KIWIIRC_NAMESPACE
annotations:
kubernetes.io/ingress.class: azure/application-gateway
appgw.ingress.kubernetes.io/ssl-redirect: "true"
cert-manager.io/cluster-issuer: letsencrypt-prod
cert-manager.io/acme-challenge-type: http01
spec:
tls:
- hosts:
- kiwi.mis.li
secretName: kiwiirc-cert
rules:
- host: kiwi.mis.li
http:
paths:
- path: /
backend:
serviceName: kiwiirc
servicePort: 80
EOF
kubectl get pods --no-headers -o wide --selector app=kiwiirc -n "$KIWIIRC_NAMESPACE"
kubectl get endpoints --no-headers -o wide --selector app=kiwiirc -n "$KIWIIRC_NAMESPACE"
kubectl get service -o wide -n "$KIWIIRC_NAMESPACE"
for x in $(kubectl get service -n "$KIWIIRC_NAMESPACE" --selector app=kiwiirc --no-headers | awk '{print $1}'); do
kubectl get service "$x" -n "$KIWIIRC_NAMESPACE" -o jsonpath='{.status.loadBalancer.ingress[*].ip}'
done
|
package com.futureplatforms.kirin.test.dummies;
import java.util.Collection;
import java.util.List;
import com.futureplatforms.kirin.internal.attic.ProxyGenerator;
import com.futureplatforms.kirin.internal.core.INativeContext;
import com.futureplatforms.kirin.internal.core.NativeContext.SettableFuture;
public class DummyNativeContext implements INativeContext {
private Collection<String> mMethodNames;
public String mLastModuleName;
public Object mLastNativeObject;
public void setDummyMethods(List<String> methodNames) {
mMethodNames = methodNames;
}
@Override
public Collection<String> getMethodNamesForObject(String moduleName) {
return mMethodNames;
}
@Override
public void registerNativeObject(String moduleName, Object object, ProxyGenerator proxyGenerator) {
mLastModuleName = moduleName;
mLastNativeObject = object;
}
@Override
public void unregisterNativeObject(String moduleName) {
if (moduleName == null || moduleName.equals(mLastModuleName)) {
mLastModuleName = null;
mLastNativeObject = null;
}
}
@Override
public void executeCommandFromModule(String moduleName, String methodName,
Object... args) {
// NOP
}
// Helpful for testing.
public boolean isModuleRegistered() {
return mLastModuleName != null;
}
public void reset() {
unregisterNativeObject(null);
mMethodNames = null;
}
@Override
public <T> SettableFuture<T> getFuture(Long id) {
// TODO Auto-generated method stub
return null;
}
@Override
public <T> void setReturnValue(Long id, T value) {
// TODO Auto-generated method stub
}
@Override
public Long createNewId() {
// TODO Auto-generated method stub
return null;
}
}
|
(function () {
"use strict";
angular.module('facadu')
.directive('quickEditBubble', QuickEditDirective);
/////////////////
QuickEditDirective.$inject = ['PATHS'];
function QuickEditDirective(PATHS) {
//noinspection UnnecessaryLocalVariableJS
var directive = {
replace: true,
require: ['quickEditBubble', '?^prongBubble'],
templateUrl: function () {
return PATHS.templates + 'quickEditBubble.html';
},
scope: {
newEvent: '=',
onSave: '&'
},
controller: QuickEditController,
controllerAs: 'vm',
bindToController: true,
link: QuickEditLinkFn
};
return directive;
///////////////////
//noinspection JSUnusedLocalSymbols
function QuickEditLinkFn(scope, element, attrs, controllers) {
var quickCtrl = controllers[0],
prongCtrl = controllers[1];
quickCtrl.quickEditSubmitted = quickEditSubmitted;
/////////////
function quickEditSubmitted() {
quickCtrl.onSave();
if (prongCtrl && prongCtrl.closeFn) prongCtrl.closeFn();
}
}
}
QuickEditController.$inject = ['CalendarService'];
function QuickEditController(CalendarService) {
var vm = this;
vm.saveEvent = CalendarService.createOrEditEvent;
}
})();
|
#!/bin/sh
if [ -z "`which github-changes`" ]; then
# specify version because github-changes "is under heavy development. Things
# may break between releases" until 0.1.0
echo "First, do: [sudo] npm install -g github-changes@0.0.14"
exit 1
fi
if [ -d .git/refs/remotes/upstream ]; then
remote=upstream
else
remote=origin
fi
# Increment v2.x.y -> v2.x+1.0
# npm version minor || exit 1
# Generate changelog from pull requests
github-changes -o bda-research -r node-crawler \
--auth --verbose \
--file /tmp/CHANGELOG.md \
--only-pulls --use-commit-body \
--date-format '(YYYY/MM/DD)' \
|| exit 1
# Since the tag for the new version hasn't been pushed yet, any changes in it
# will be marked as "upcoming"
version="$(grep '"version"' package.json | cut -d'"' -f4)"
sed -i -e "s/^### upcoming/### v$version/" CHANGELOG.md
# This may fail if no changelog updates
# TODO: would this ever actually happen? handle it better?
# git add CHANGELOG.md; git commit -m 'Update changelog'
# Publish the new version to npm
# npm publish || exit 1
# Increment v2.x.0 -> v2.x.1
# For rationale, see:
# https://github.com/request/oauth-sign/issues/10#issuecomment-58917018
# npm version patch || exit 1
# Push back to the main repo
# git push $remote master --tags || exit 1
|
#!/bin/bash
catUuid=0
tagId=0
#Strip all leading and trailing spaces
function trim() {
local trimmed="$1"
# Strip leading spaces.
while [[ $trimmed == ' '* ]]; do
trimmed="${trimmed## }"
done
# Strip trailing spaces.
while [[ $trimmed == *' ' ]]; do
trimmed="${trimmed%% }"
done
echo "$trimmed"
}
#插入一个分类 && 获取序号
#$1 分类名称 必选
#$2 父分类的uuid 可选
function insertNewCatAndGetSeq() {
catName=$1
parentCatUuid=$2
if [ -z "${catName}" ]
then
catUuid=${topCatUuid}
return;
fi
if [ -z "${parentCatUuid}" ]
then
catUuid=$(sqlite3 ${mWebDb} "select uuid from cat where name='${catName}'")
pid=0
else
catUuid=$(sqlite3 ${mWebDb} "select uuid from cat where name='${catName}' and pid=${parentCatUuid}")
pid=${parentCatUuid}
fi
if [ -n "${catUuid}" ]
then
echo "分类已存在,不再创建"
echo "catUuid=${catUuid}"
return
fi
currentCatSeq=$(sqlite3 ${mWebDb} "select seq from sqlite_sequence where name='cat'")
newCatSeq=$((currentCatSeq+1))
echo "newCatSeq=${newCatSeq}"
sqlite3 ${mWebDb} "update sqlite_sequence set seq=${newCatSeq} where name='cat'"
currentSortSeq=$(sqlite3 ${mWebDb} "select max(sort) from cat")
newSortSeq=$((currentSortSeq+1))
echo "newSortSeq=${newSortSeq}"
uuid=`gdate +%s%N | cut -c1-14`
sqlite3 ${mWebDb} "insert into cat(id, pid, uuid, name, docName, catType, sort, sortType, siteURL, siteSkinName, siteLastBuildDate, siteBuildPath, siteFavicon, siteLogo, siteDateFormat, sitePageSize, siteListTextNum, siteName, siteDes, siteShareCode, siteHeader, siteOther, siteMainMenuData, siteExtDef, siteExtValue, sitePostExtDef, siteEnableLaTeX, siteEnableChart) values(${newCatSeq}, ${pid}, ${uuid}, '${catName}', '', 12, ${newSortSeq}, 0, '', '', 0, '', '', '', '', 0, 0, '', '', '', '', '', '', '', '', '', 0, 0)"
echo "新分类已创建"
catUuid=$(sqlite3 ${mWebDb} "select uuid from cat where name='${catName}'" | sed -n '1p')
echo "catUuid=${catUuid}"
}
#插入一个tag && 获取序号
function insertNewTagAndGetSeq() {
tagId=$(sqlite3 ${mWebDb} "select id from tag where name='${tagName}'")
if [ -n "${tagId}" ]
then
echo "Tag已存在,不再创建"
echo "tagId=${tagId}"
return
fi
currentTagSeq=$(sqlite3 ${mWebDb} "select seq from sqlite_sequence where name='tag'")
newTagSeq=$((currentTagSeq+1))
echo "newTagSeq=${newTagSeq}"
sqlite3 ${mWebDb} "update sqlite_sequence set seq=${newTagSeq} where name='${tagName}'"
sqlite3 ${mWebDb} "insert into tag(id, name) values(${newTagSeq}, '${tagName}')"
tagId=$(sqlite3 ${mWebDb} "select id from tag where name='${tagName}'" | sed -n '1p')
echo "tagId=${tagId}"
}
#插入新文章
function insertNewArticle() {
aid=$1
currentSeq=$(sqlite3 ${mWebDb} "select seq from sqlite_sequence where name='article'")
newSeq=$((currentSeq+1))
echo "currentSeq=${currentSeq}"
dateAddModify=$(echo ${aid} | cut -c1-10)
sqlite3 ${mWebDb} "update sqlite_sequence set seq=${newSeq} where name='article'"
sqlite3 ${mWebDb} "insert into article(id, uuid, type, state, sort, dateAdd, dateModif, dateArt, docName, otherMedia, buildResource, postExtValue) values(${newSeq}, ${aid}, 0, 1, ${aid}, ${dateAddModify}, ${dateAddModify}, ${dateAddModify}, '', '', '', '')"
}
#设置文章分类
#$1 articleId
#$2 categoryUuid
function insertNewCatArticle() {
aid=$1
catUuid=$2
currentSeq=$(sqlite3 ${mWebDb} "select seq from sqlite_sequence where name='cat_article'")
newSeq=$((currentSeq+1))
echo "currentSeq=${currentSeq}"
sqlite3 ${mWebDb} "update sqlite_sequence set seq=${newSeq} where name='cat_article'"
sqlite3 ${mWebDb} "insert into cat_article(id, rid, aid) values(${newSeq}, ${catUuid}, ${aid})"
}
#设置文章tag
function insertNewTagArticle() {
aid=$1
currentSeq=$(sqlite3 ${mWebDb} "select seq from sqlite_sequence where name='tag_article'")
newSeq=$((currentSeq+1))
echo "currentSeq=${currentSeq}"
sqlite3 ${mWebDb} "update sqlite_sequence set seq=${newSeq} where name='tag_article'"
sqlite3 ${mWebDb} "insert into tag_article(id, rid, aid) values(${newSeq}, ${tagId}, ${aid})"
}
#脚本入参
hexoSrcDir=/Users/chenzz/blog/source/_posts
mWebBase=/Users/chenzz/Documents/MWeb/mainlib
#mWebBase=/Users/chenzz/Downloads/mainlib
mWebDocsDir=${mWebBase}/docs
mWebDb=${mWebBase}/mainlib.db
userInputCatName="我的博客"
#tagName="blog"
##reset 测试数据
#echo "reseting test date..."
#rm -rf ${mWebBase}
#cp -r /Users/chenzz/Documents/MWeb/mainlib ${mWebBase}
#创建目录和Tag
cd ${mWebDocsDir}
echo "inserting cat and tag..."
insertNewCatAndGetSeq "${userInputCatName}"
#insertNewTagAndGetSeq
topCatUuid=${catUuid}
cd ${hexoSrcDir}
for file in *
do
dateStr=$(cat "${file}" | grep 'date:' | sed -n '1p' | awk -F ' ' '{print $2" "$3}' )
newFileName="$(gdate --date="${dateStr}" +"%s")0000"
#parse title
title=$(cat "${file}" | grep 'title:' | sed -n '1p' | awk -F '"' '{print $2}' )
if [ -z "${title}" ]
then
title=$(cat "${file}" | grep 'title:' | sed -n '1p' | awk -F ' ' '{print $2}' )
fi
echo "title is: ${title}"
#parse category
category=$(cat "${file}" | grep 'categories:' | sed -n '1p' | awk -F ' ' '{print $2}' )
if [ -z "${category}" ]
then
category=$(cat "${file}" | grep 'categories:' | sed -n '1p' | awk -F ':' '{print $2}' )
fi
category=$(trim $category)
echo "category is: ${category}"
echo "inserting sub cat..."
insertNewCatAndGetSeq "${category}" ${topCatUuid}
# #parse tags
# tagsStr=$(cat "${file}" | grep 'tags:' | sed -n '1p' | awk -F ':' '{print $2}' )
# echo "tagsStr is: ${tagsStr}"
# if [ -n "${tagsStr}" ]
# then
# tagsStr=${tagsStr:1: -1}
# IFS=', '
# ary=($str)
# fi
# echo "tags is: ${ary[@]}"
echo "copying ${file} to ${mWebDocsDir}/${newFileName}.md"
cp "${file}" ${mWebDocsDir}/${newFileName}.md
echo "deleting 1-8 line of file..."
gsed -i "1,8d" "${mWebDocsDir}/${newFileName}.md"
echo "inserting the title..."
gsed -i "1i${title}" "${mWebDocsDir}/${newFileName}.md"
echo "inserting the toc..."
gsed -i "2i\ " "${mWebDocsDir}/${newFileName}.md"
gsed -i "3i[TOC]" "${mWebDocsDir}/${newFileName}.md"
gsed -i "4i\ " "${mWebDocsDir}/${newFileName}.md"
insertNewArticle ${newFileName} ${title}
insertNewCatArticle ${newFileName} ${catUuid}
# insertNewTagArticle ${newFileName}
done
|
<filename>spec/str2jsify.spec.js
/*global describe, it */
var expect = require('expect.js'),
str2jsify = require('../'),
outputRe = /^\s*module\.exports\s*=\s*('[^']*'|"[^"]*");?\s*$/;
describe('str2jsify', function() {
it('converts its input into a JS module', function(done) {
var transform = str2jsify.configure({filenames: 'foo.html'});
var input = 'Some arbitrary text',
output = null,
stream = transform('foo.html'),
timesCalled = 0;
stream.on('data', function(data) {
output = data;
timesCalled++;
});
stream.on('end', function() {
expect(timesCalled).to.be(1);
expect(output).to.contain(input);
expect(output).to.match(outputRe);
done();
});
stream.write(input);
stream.end();
});
it('ignores other extensions', function(done) {
var input = 'Some arbitrary text',
output = null,
stream = str2jsify('bleh.js'),
timesCalled = 0;
stream.on('data', function(data) {
output = data;
timesCalled++;
});
stream.on('end', function() {
expect(output).to.contain(input);
expect(output).to.equal(input);
expect(timesCalled).to.be(1);
done();
});
stream.write(input);
stream.end();
});
it('converts files with the correct extension', function(done) {
var transform = str2jsify.configure({extensions: '.ext'});
var input = 'Some arbitrary text',
output = null,
stream = transform('bleh.ext'),
timesCalled = 0;
stream.on('data', function(data) {
output = data;
timesCalled++;
});
stream.on('end', function() {
expect(output).to.match(outputRe);
expect(timesCalled).to.be(1);
done();
});
stream.write(input);
stream.end();
});
it('converts files with a matching pattern', function(done) {
var transform = str2jsify.configure({patterns: ['^foo$']});
var input = 'Some arbitrary text',
output = null,
stream = transform('foo'),
timesCalled = 0;
stream.on('data', function(data) {
output = data;
timesCalled++;
});
stream.on('end', function() {
expect(output).to.match(outputRe);
expect(timesCalled).to.be(1);
done();
});
stream.write(input);
stream.end();
});
it('ignores non-matching patterns', function(done) {
var transform = str2jsify.configure({patterns: ['^foo$']});
var input = 'Some arbitrary text',
output = null,
stream = transform('bar'),
timesCalled = 0;
stream.on('data', function(data) {
output = data;
timesCalled++;
});
stream.on('end', function() {
expect(output).to.contain(input);
expect(output).to.equal(input);
expect(timesCalled).to.be(1);
output = null;
stream = transform('FOO');
timesCalled = 0;
stream.on('data', function(data) {
output = data;
timesCalled++;
});
stream.on('end', function() {
expect(output).to.contain(input);
expect(output).to.equal(input);
expect(timesCalled).to.be(1);
done();
});
stream.write(input);
stream.end();
});
stream.write(input);
stream.end();
});
}); |
#!/bin/bash
# Check if the required arguments are provided
if [ $# -lt 4 ]; then
echo "Usage: $0 <clang-tidy-binary> <compile_commands.json> <apply-fixes> <file1> [file2 ...]"
exit 1
fi
CLANG_TIDY=$1
COMPILE_COMMANDS=$2
APPLY_FIXES=$3
shift 3
# Iterate through the list of files and run clang-tidy
for file in "$@"; do
if [ -f "$file" ]; then
echo "Running clang-tidy on $file"
if [ "$APPLY_FIXES" = "true" ]; then
$CLANG_TIDY -p="$COMPILE_COMMANDS" -fix "$file"
else
$CLANG_TIDY -p="$COMPILE_COMMANDS" "$file"
if [ $? -ne 0 ]; then
echo "Error: Clang-tidy found issues in $file"
exit 1
fi
fi
else
echo "Error: File $file not found"
fi
done |
<gh_stars>0
package cyclops.stream.spliterator.push;
import java.util.function.Consumer;
/**
* Created by johnmcclean on 12/01/2017.
*/
public class LimitOperator<T, R> extends BaseOperator<T, T> {
long limit;
public LimitOperator(Operator<T> source,
long limit) {
super(source);
this.limit = limit;
}
@Override
public StreamSubscription subscribe(Consumer<? super T> onNext,
Consumer<? super Throwable> onError,
Runnable onComplete) {
long[] count = {0};
StreamSubscription[] sub = {null};
boolean[] completed = {false};
sub[0] = source.subscribe(e -> {
try {
if (count[0]++ < limit) {
onNext.accept(e);
} else {
sub[0].cancel();
if (!completed[0]) {
completed[0] = true;
onComplete.run();
}
}
} catch (Throwable t) {
onError.accept(t);
}
},
onError,
() -> {
if (!completed[0]) {
completed[0] = true;
onComplete.run();
}
});
return sub[0];
}
@Override
public void subscribeAll(Consumer<? super T> onNext,
Consumer<? super Throwable> onError,
Runnable onCompleteDs) {
long[] count = {0};
StreamSubscription[] sub = {null};
boolean[] completed = {false};
source.subscribeAll(e -> {
try {
if (count[0]++ < limit) {
onNext.accept(e);
} else {
if (!completed[0]) {
completed[0] = true;
onCompleteDs.run();
}
}
} catch (Throwable t) {
onError.accept(t);
}
},
onError,
() -> {
if (!completed[0]) {
completed[0] = true;
onCompleteDs.run();
}
});
}
}
|
#! /bin/bash
export CHROMIUM_VERSION=80.0.3987.106
export PPTR_VERSION=2.1.0
npm run wd -- update --versions.chrome=${CHROMIUM_VERSION}
npm i --no-save puppeteer@${PPTR_VERSION} |
function filterArray(arr) {
return arr.filter(num => num % 2 != 0);
} |
document.querySelector('#about').scrollIntoView({
behavior: 'smooth'
}); |
#!/usr/bin/env bats
GIT_ROOT=$(git rev-parse --show-toplevel)
cd $GIT_ROOT
setup() {
$GIT_ROOT/test/docker/up.sh
source $GIT_ROOT/test/docker/.env
}
teardown() {
$GIT_ROOT/test/docker/down.sh
}
@test "Verify that vault address is set" {
[ "$VAULT_ADDR" == "http://0.0.0.0:8200" ]
}
@test "Verify that vault is unseal" {
result="$(vault status | grep "Sealed" | tr -s ' ' | cut -d " " -f2)"
[ "$result" == "false" ]
}
@test "Verify that vault initialized" {
result="$(vault status | grep "Initialized" | tr -s ' ' | cut -d " " -f2)"
[ "$result" == "true" ]
}
|
#!/bin/bash
git clone https://github.com/riscv/riscv-tests
cd riscv-tests
git submodule update --init --recursive
autoconf
./configure --prefix=$RISCV/target
make
make install
|
#include <X11/Xlib.h>
#include <SDL2/SDL.h>
#include "errors.h"
#include "display.h"
int XResetScreenSaver(Display *display) {
// https://tronche.com/gui/x/xlib/window-and-session-manager/XResetScreenSaver.html
SDL_DisableScreenSaver();
SDL_EnableScreenSaver();
return 1;
}
int XForceScreenSaver(Display *display, int mode) {
// https://tronche.com/gui/x/xlib/window-and-session-manager/XForceScreenSaver.html
// https://www.libsdl.org/tmp/docs-1.3/_s_d_l__video_8h.html#6e5293ce67509a49c1ead749fc4547d9
SET_X_SERVER_REQUEST(display, X_ForceScreenSaver);
switch (mode) {
case ScreenSaverActive:
// Activate the screen saver now
SDL_EnableScreenSaver();
break;
case ScreenSaverReset:
XResetScreenSaver(display);
break;
default:
handleError(0, display, None, 0, BadValue, 0);
return 0;
}
return 1;
}
|
def squared_list(input_list):
output_list = [x ** 2 for x in input_list]
return output_list
input_list = [1, 2, 3, 4]
output_list = squared_list(input_list)
print(output_list) |
import React from "react";
import renderer from "react-test-renderer";
import { Button } from "../Button";
import { Icon } from "../../../components/icons/icons";
describe("Button Component", () => {
it("Button with text snapshot", () => {
const tree = renderer.create(<Button name={"test"}>Hello</Button>).toJSON();
expect(tree).toMatchSnapshot();
});
it("Button circle with icon snapshot", () => {
const tree = renderer
.create(
<Button name={"test"} type={"circle"}>
<Icon.Exchange />
</Button>,
)
.toJSON();
expect(tree).toMatchSnapshot();
});
it("Button bg white with icon snapshot", () => {
const tree = renderer
.create(
<Button name={"test"} type={"circle"} bg={"white"}>
<Icon.Exchange />
</Button>,
)
.toJSON();
expect(tree).toMatchSnapshot();
});
it("Button bg white disabled snapshot", () => {
const tree = renderer
.create(
<Button name={"test"} type={"circle"} bg={"white"}>
Hello
</Button>,
)
.toJSON();
expect(tree).toMatchSnapshot();
});
});
|
package patterns.factory.simple;
import org.junit.Test;
import org.junit.Assert;
public class SimpleClientTest {
@Test
public void testProduct1Creation() throws Exception {
Product product1 = Product.createProduct1();
Assert.assertEquals(Product1.NAME, product1.getName());
}
@Test
public void testProduct2Creation() throws Exception {
Product product2 = Product.createProduct2();
Assert.assertEquals(Product2.NAME, product2.getName());
}
}
|
import { Component } from '@angular/core';
import { Store } from '@ngrx/store';
import { Observable } from 'rxjs';
import * as EmployeesSelectors from '../employees.selectors';
@Component({
selector: 'current-employees',
templateUrl: './current-employees.component.html'
})
export class CurrentEmployeesComponent {
employeeList: Observable<string[]>;
constructor(store: Store) {
this.employeeList = store.select(
EmployeesSelectors.getCurrentEmployees
);
}
}
|
<gh_stars>0
import { BottomTabScreenProps } from "@react-navigation/bottom-tabs";
import {
CompositeScreenProps,
NavigatorScreenParams,
} from "@react-navigation/native";
import { NativeStackScreenProps } from "@react-navigation/native-stack";
declare global {
namespace ReactNavigation {
interface RootParamList extends RootStackParamList {}
}
}
export type RootStackParamList = {
Root: NavigatorScreenParams<RootTabParamList> | undefined;
AddNewTrip: undefined;
};
export type RootStackScreenProps<Screen extends keyof RootStackParamList> =
NativeStackScreenProps<RootStackParamList, Screen>;
export type RootTabParamList = {
Trips: undefined;
Maps: undefined;
AddTrip: undefined;
Stats: undefined;
Profile: undefined;
};
export type RootTabScreenProps<Screen extends keyof RootTabParamList> =
CompositeScreenProps<
BottomTabScreenProps<RootTabParamList, Screen>,
NativeStackScreenProps<RootStackParamList>
>;
|
#!/bin/bash
VERSION="0.4.1"
DATE="2020-12-06"
NAME="devilbox-cli"
DESCRIPTION="A simple and conveniant command line to manage devilbox from anywhere"
LINK="https://github.com/louisgab/devilbox-cli"
ENV_FILE=".env"
PHP_CONFIG="PHP_SERVER="
APACHE_CONFIG="HTTPD_SERVER=apache-"
MYSQL_CONFIG="MYSQL_SERVER=mysql-"
DOCROOT_CONFIG="HTTPD_DOCROOT_DIR="
WWWPATH_CONFIG="HOST_PATH_HTTPD_DATADIR="
## Basic wrappers around exit codes
OK_CODE=0
KO_CODE=1
was_success() {
local exit_code=$?
[ "$exit_code" -eq "$OK_CODE" ]
}
was_error() {
local exit_code=$?
[ "$exit_code" -eq "$KO_CODE" ]
}
die () {
local exit_code=$1
if [ ! -z "$exit_code" ]; then
exit "$exit_code"
else
exit "$?"
fi
}
## Functions used for fancy output
COLOR_DEFAULT=$(tput sgr0)
COLOR_RED=$(tput setaf 1)
COLOR_GREEN=$(tput setaf 2)
COLOR_YELLOW=$(tput setaf 3)
COLOR_BLUE=$(tput setaf 4)
# COLOR_PURPLE=$(tput setaf 5)
# COLOR_CYAN=$(tput setaf 6)
COLOR_LIGHT_GRAY=$(tput setaf 7)
COLOR_DARK_GRAY=$(tput setaf 0)
error() {
local message=$1
printf "%s %s\n" "${COLOR_RED}[✘]" "${COLOR_DEFAULT}$message" >&2
die "$KO_CODE"
}
success() {
local message=$1
printf "%s %s\n" "${COLOR_GREEN}[✔]" "${COLOR_DEFAULT}$message"
}
info() {
local message=$1
printf "%s %s\n" "${COLOR_YELLOW}[!]" "${COLOR_DEFAULT}$message"
}
question() {
local message=$1
printf "%s %s\n" "${COLOR_BLUE}[?]" "${COLOR_DEFAULT}$message"
}
## Functions used for user interaction
has_confirmed() {
local response=$1
case "$response" in
[yY][eE][sS]|[yY]) return "$OK_CODE";;
*) return "$KO_CODE";;
esac
}
ask() {
local question=$1
local response
read -r -p "$(question "${question} [y/N] ")" response
printf '%s' "$response"
return "$OK_CODE"
}
confirm() {
local question=$1
if has_confirmed "$(ask "$question")"; then
return "$OK_CODE"
else
return "$KO_CODE"
fi
}
## Functions used to manipulate choices values in .env file
is_choice_existing () {
local config=$1
local choice=$2
local search
search=$(grep -Eo "^#*$config$choice" "$ENV_FILE")
if was_success && [ ! -z "$search" ] ;then
return "$OK_CODE"
else
return "$KO_CODE"
fi
}
get_current_choice () {
local config=$1
local current
current=$(grep -Eo "^$config+[.[:digit:]]*" "$ENV_FILE" | sed "s/.*$config//g")
if was_success && [ ! -z "$current" ] ;then
printf "%s" "$current"
return "$OK_CODE"
else
return "$KO_CODE"
fi
}
is_choice_available() {
local config=$1
local choice=$2
local current
current=$(get_current_choice "$config")
if was_success && [ "$choice" != "$current" ] ;then
return "$OK_CODE"
else
return "$KO_CODE"
fi
}
get_all_choices () {
local config=$1
local all
all=$(grep -Eo "^#*$config+[.[:digit:]]*" "$ENV_FILE" | sed "s/.*$config//g")
if was_success && [ ! -z "$all" ] ;then
printf "%s\n" "$all"
return "$OK_CODE"
else
return "$KO_CODE"
fi
}
set_choice () {
local config=$1
local new=$2
local current
if ! is_choice_existing "$config" "$new" || ! is_choice_available "$config" "$new"; then
return "$KO_CODE"
fi
current=$(get_current_choice "$config")
if was_error; then
return "$KO_CODE"
fi
sed -i -e "s/\(^#*$config$current\).*/#$config$current/" "$ENV_FILE"
if was_error; then
return "$KO_CODE"
fi
sed -i -e "s/\(^#*$config$new\).*/$config$new/" "$ENV_FILE"
if was_error; then
return "$KO_CODE"
fi
current=$(get_current_choice "$config")
if was_success && [[ "$current" = "$new" ]]; then
return "$OK_CODE"
else
return "$KO_CODE"
fi
}
### READABLE VERSIONS
is_readable_choice_existing () {
local type=$1
local config=$2
local choice=$3
if is_choice_existing "$config" "$choice"; then
success "$type version $choice is existing"
return "$OK_CODE"
else
error "$type version $choice does not exists"
return "$K0_CODE"
fi
}
get_readable_current_choice () {
local type=$1
local config=$2
local current
current=$(get_current_choice "$config")
if was_success; then
info "$type current version is $current"
return "$OK_CODE"
else
error "Couldnt retrieve current version of $type"
return "$KO_CODE"
fi
}
is_readable_choice_available() {
local config=$1
local choice=$2
local isavailable
if is_choice_available "$config" "$choice"; then
success "$type version $choice is available"
return "$OK_CODE"
else
error "$type is already using version $choice"
return "$K0_CODE"
fi
}
get_readable_all_choices () {
local type=$1
local config=$2
local all
all=$(get_all_choices "$config")
if was_success; then
info "$type available versions:"
printf "%s\n" "$all"
return "$OK_CODE"
else
error "Couldnt retrive available versions of $type"
return "$KO_CODE"
fi
}
set_readable_choice () {
local type=$1
local config=$2
local new=$3
if ! is_readable_choice_existing "$type" "$config" "$new"; then
return "$KO_CODE"
fi
if ! is_readable_choice_available "$config" "$new"; then
return "$KO_CODE"
fi
if set_choice "$config" "$new"; then
success "$type version updated to $new"
return "$OK_CODE"
else
error "$type version change failed"
return "$KO_CODE"
fi
}
## Functions used to manipulate a config value in .env file
get_config () {
local config=$1
local current
current=$(grep -Eo "^$config+[[:alnum:][:punct:]]*" "$ENV_FILE" | sed "s/.*$config//g")
if was_success && [ ! -z "$current" ] ;then
printf "%s" "$current"
return "$OK_CODE"
else
return "$KO_CODE"
fi
}
set_config () {
local config=$1
local new=$2
local current
current="$(get_config "$config")"
if was_error; then
return "$KO_CODE"
fi
sed -i -e "s/\(^#*$config${current//\//\\\/}\).*/$config${new//\//\\\/}/" "$ENV_FILE"
if was_error; then
return "$KO_CODE"
fi
current="$(get_config "$config")"
if was_success && [[ "$current" = "$new" ]]; then
return "$OK_CODE"
else
return "$KO_CODE"
fi
}
### READABLE VERSIONS
get_readable_current_config () {
local type=$1
local config=$2
local current
current=$(get_config "$config")
if was_success; then
info "$type current config is $current"
return "$OK_CODE"
else
error "Couldnt retrieve current config of $type"
return "$KO_CODE"
fi
}
set_readable_config () {
local type=$1
local config=$2
local new=$3
if set_config "$config" "$new"; then
success "$type config updated to $new"
return "$OK_CODE"
else
error "$type config change failed"
return "$KO_CODE"
fi
}
is_running () {
local all
all=$(docker-compose ps 2> /dev/null | grep "devilbox" | awk '{print $3}' | grep "Up")
if was_success; then
return "$OK_CODE";
else
return "$KO_CODE";
fi
}
get_current_apache_version () {
get_readable_current_choice "Apache" "$APACHE_CONFIG"
}
get_all_apache_versions () {
get_readable_all_choices "Apache" "$APACHE_CONFIG"
}
set_apache_version () {
local new=$1
set_readable_choice "Apache" "$APACHE_CONFIG" "$new"
}
get_current_php_version () {
get_readable_current_choice "PHP" "$PHP_CONFIG"
}
get_all_php_versions () {
get_readable_all_choices "PHP" "$PHP_CONFIG"
}
set_php_version () {
local new=$1
set_readable_choice "PHP" "$PHP_CONFIG" "$new"
}
get_current_mysql_version () {
get_readable_current_choice "MySql" "$MYSQL_CONFIG"
}
get_all_mysql_versions () {
get_readable_all_choices "MySql" "$MYSQL_CONFIG"
}
set_mysql_version () {
local new=$1
set_readable_choice "MySql" "$MYSQL_CONFIG" "$new"
}
get_current_document_root () {
get_readable_current_config "Document root" "$DOCROOT_CONFIG"
}
set_document_root () {
local new=$1
set_readable_config "Document root" "$DOCROOT_CONFIG" "$new"
}
get_current_projects_path () {
get_readable_current_config "Projects path" "$WWWPATH_CONFIG"
}
set_projects_path () {
local new=$1
set_readable_config "Projects path" "$WWWPATH_CONFIG" "$new"
}
check_command () {
./check-config.sh
}
config_command () {
for arg in "$@"; do
case $arg in
-a=\*|--apache=\*) get_all_apache_versions; shift;;
-a=*|--apache=*) set_apache_version "${arg#*=}"; shift;;
-a|--apache) get_current_apache_version; shift;;
-p=\*|--php=\*) get_all_php_versions; shift;;
-p=*|--php=*) set_php_version "${arg#*=}"; shift;;
-p|--php) get_current_php_version; shift;;
-m=\*|--mysql=\*) get_all_mysql_versions; shift;;
-m=*|--mysql=*) set_mysql_version "${arg#*=}"; shift;;
-m|--mysql) get_current_mysql_version; shift;;
-r=*|--root=*) set_document_root "${arg#*=}"; shift;;
-r|--root) get_current_document_root; shift;;
-w=*|--www=*) set_projects_path "${arg#*=}"; shift;;
-w|--www) get_current_projects_path; shift;;
esac
done
}
enter_command () {
if ! is_running; then
error "Devilbox containers are not running"
return "$KO_CODE"
fi
./shell.sh
}
exec_command() {
if ! is_running; then
error "Devilbox containers are not running"
return "$KO_CODE"
fi
docker-compose exec -u devilbox php bash -c "$@"
}
add_usage_command () {
local command=$1
local description=$2
printf '%-30s\t %s\n' "$command" "${COLOR_DARK_GRAY}$description${COLOR_DEFAULT}"
}
add_usage_arg () {
local arg=$1
local description=$2
printf '%-30s\t %s\n' " ${COLOR_LIGHT_GRAY}$arg" "${COLOR_DARK_GRAY}$description${COLOR_DEFAULT}"
}
help_command () {
printf "\n"
printf "%s\n" "Usage: $0 <command> [--args]... "
printf "\n"
add_usage_command "check" "Check your .env file for potential errors"
add_usage_command "c,config" "Show / Edit the current config"
add_usage_arg "-a=<x.x>,--apache=<x.x>" "Set a specific apache version"
add_usage_arg "-a=*,--apache=*" "Get all available apache versions"
add_usage_arg "-p=*,--php=*" "Get all available php versions"
add_usage_arg "-m=*,--mysql=*" "Get all available mysql versions"
add_usage_arg "-p,--php" "Get current php version"
add_usage_arg "-a,--apache" "Get current apache version"
add_usage_arg "-m,--mysql" "Get current mysql version"
add_usage_arg "-r=<path>,--root=<path>" "Set the document root"
add_usage_arg "-r,--root" "Get the current document root"
add_usage_arg "-w=<path>,--www=<path>" "Set the path to projects"
add_usage_arg "-w,--www" "Get the current path to projects"
add_usage_arg "-d=<path>,--database=<path>" "Set the path to databases"
add_usage_arg "-d,--database" "Get the current path to databases"
add_usage_arg "-p=<x.x>,--php=<x.x>" "Set a specific php version"
add_usage_arg "-m=<x.x>,--mysql=<x.x>" "Set a specific mysql version"
add_usage_command "e,enter" "Enter the devilbox shell"
add_usage_command "x, exec '<command>'" "Execute a command inside the container without entering it"
add_usage_command "h, help" "List all available commands"
add_usage_command "mysql ['<query>']" "Launch a preconnected mysql shell, with optional query"
add_usage_command "o,open" "Open the devilbox intranet"
add_usage_arg "-h,--http" "Use non-https url"
add_usage_command "restart" "Restart the devilbox docker containers"
add_usage_arg "-s,--silent" "Hide errors and run in background"
add_usage_command "r,run" "Run the devilbox docker containers"
add_usage_arg "-s,--silent" "Hide errors and run in background"
add_usage_command "s,stop" "Stop devilbox and docker containers"
add_usage_command "u,update" "Update devilbox and docker containers"
add_usage_command "v, version" "Show version information"
printf "\n"
}
mysql_command() {
if ! is_running; then
error "Devilbox containers are not running"
return "$KO_CODE"
fi
if [ -z "$1" ]; then
exec_command 'mysql -hmysql -uroot'
else
exec_command "mysql -hmysql -uroot -e '$1'"
fi
}
open_http_intranet () {
xdg-open "http://localhost/" 2> /dev/null >/dev/null
}
open_https_intranet () {
xdg-open "https://localhost/" 2> /dev/null >/dev/null
}
open_command () {
if ! is_running; then
error "Devilbox containers are not running"
return "$KO_CODE"
fi
if [[ $# -eq 0 ]] ; then
open_https_intranet
else
for arg in "$@"; do
case $arg in
-h|--http) open_http_intranet; shift;;
esac
done
fi
}
restart_command() {
stop_command
run_command "$@"
}
get_default_containers() {
if [ -n "$DEVILBOX_CONTAINERS" ]; then
printf %s "${DEVILBOX_CONTAINERS}"
else
printf %s "httpd php mysql"
fi
}
run_containers () {
docker-compose up $(get_default_containers)
}
run_containers_silent () {
docker-compose up -d $(get_default_containers)
}
run_command () {
if is_running; then
error "Devilbox containers are already running"
return "$KO_CODE"
fi
if [[ $# -eq 0 ]] ; then
run_containers
else
for arg in "$@"; do
case $arg in
-s|--silent) run_containers_silent; shift;;
esac
done
fi
}
stop_command () {
if ! is_running; then
error "Devilbox containers are not running"
return "$KO_CODE"
fi
docker-compose stop
docker-compose rm -f
}
get_recent_devilbox_versions () {
local versions
versions=$(git fetch --tags && git describe --abbrev=0 --tags $(git rev-list --tags --max-count=10))
if was_success; then
info "Devilbox available versions:"
printf "%s\n" "$versions"
return "$OK_CODE"
else
error "Couldnt retrive available versions of devilbox"
return "$KO_CODE"
fi
}
latest_version () {
local latest
latest=$(git fetch --tags && git describe --abbrev=0 --tags $(git rev-list --tags --max-count=1))
if was_success; then
info "Devilbox latest version is $latest"
return "$OK_CODE"
else
error "Couldnt retrieve latest version of devilbox"
return "$KO_CODE"
fi
}
set_devilbox_version () {
local version=$1
confirm "Did you backup your databases before?"
if was_success ;then
git fetch --tags && git checkout $version
if was_success; then
success "Devilbox updated to $version, please restart"
return "$OK_CODE"
else
error "Couldnt update devilbox"
return "$KO_CODE"
fi
fi
}
update_command () {
if is_running; then
error "Devilbox containers are running, please use devilbox stop"
return "$KO_CODE"
fi
for arg in "$@"; do
case $arg in
-v=\*|--version=\*) get_recent_devilbox_versions; shift;;
-v=*|--version=*) set_devilbox_version "${arg#*=}"; shift;;
-v=latest|--version=latest) set_devilbox_version "$(latest_version)"; shift;;
-d|--docker) sh update-docker.sh; shift;;
esac
done
}
version_command() {
printf "\n"
printf "%s\n" "$NAME v$VERSION ($DATE)"
printf "%s\n" "${COLOR_LIGHT_GRAY}$DESCRIPTION${COLOR_DEFAULT}"
printf "%s\n" "${COLOR_LIGHT_GRAY}$LINK${COLOR_DEFAULT}"
printf "\n"
}
safe_cd() {
local path=$1
local error_msg=$2
if [[ ! -d "$path" ]]; then
error "$error_msg"
fi
cd "$path" >/dev/null || error "$error_msg"
}
get_devilbox_path() {
if [ -n "$DEVILBOX_PATH" ]; then
printf %s "${DEVILBOX_PATH}"
else
printf %s "$HOME/.devilbox"
fi
}
main () {
safe_cd "$(get_devilbox_path)" "Devilbox not found, please make sure it is installed in your home directory or use DEVILBOX_PATH in your profile."
if [[ $# -eq 0 ]] ; then
version_command
help_command
else
case $1 in
check) shift; check_command;;
c|config) shift; config_command "$@";;
e|enter) shift; enter_command;;
x|exec) shift; exec_command "$@";;
h|help|-h|--help) shift; help_command;;
mysql) shift; mysql_command "$@";;
o|open) shift; open_command "$@";;
restart) shift; restart_command "$@";;
r|run) shift; run_command "$@";;
s|stop) shift; stop_command;;
u|update) shift; update_command;;
v|version|-v|--version) shift; version_command;;
*) error "Unknown command $arg, see -h for help.";;
esac
fi
}
main "$@"
|
#!/bin/bash
dieharder -d 11 -g 56 -S 1693474437
|
package acousticfield3d.gui;
import acousticfield3d.math.M;
import acousticfield3d.math.Vector2f;
import acousticfield3d.math.Vector3f;
import acousticfield3d.scene.Entity;
import acousticfield3d.scene.Scene;
import acousticfield3d.simulation.Transducer;
import acousticfield3d.utils.FileUtils;
import acousticfield3d.utils.Parse;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
*
* @author Asier
*/
public class HoloPatternsForm extends javax.swing.JFrame {
private final MainForm form;
private final ArrayList<Float> mPhases;
enum BasicOperation{
opSet, opAdd, opSus
};
public HoloPatternsForm(MainForm form) {
this.form = form;
mPhases = new ArrayList<>();
initComponents();
}
/**
* This method is called from within the constructor to initialize the form.
* WARNING: Do NOT modify this code. The content of this method is always
* regenerated by the Form Editor.
*/
@SuppressWarnings("unchecked")
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
patternsGroup = new javax.swing.ButtonGroup();
jButton1 = new javax.swing.JButton();
phaseNormalizationGroup = new javax.swing.ButtonGroup();
jLabel2 = new javax.swing.JLabel();
frText = new javax.swing.JTextField();
helicalCheck = new javax.swing.JRadioButton();
gridCheck = new javax.swing.JRadioButton();
setButton = new javax.swing.JButton();
addButton = new javax.swing.JButton();
sustractButton = new javax.swing.JButton();
mButton = new javax.swing.JButton();
mSetButton = new javax.swing.JButton();
mAddButton = new javax.swing.JButton();
mSustractButton = new javax.swing.JButton();
gaussianAmpButton = new javax.swing.JButton();
jLabel1 = new javax.swing.JLabel();
varText = new javax.swing.JTextField();
savePhasesButton = new javax.swing.JButton();
loadPhasesButton = new javax.swing.JButton();
onlySelectionCheck = new javax.swing.JCheckBox();
uniformPhaseButton = new javax.swing.JButton();
moduleText = new javax.swing.JTextField();
jLabel3 = new javax.swing.JLabel();
radialCheck = new javax.swing.JRadioButton();
jLabel4 = new javax.swing.JLabel();
offsetText = new javax.swing.JTextField();
halfSplitCheck = new javax.swing.JCheckBox();
normalizePhaseButton = new javax.swing.JButton();
phaseNorFirstCheck = new javax.swing.JRadioButton();
phaseNorMinCheck = new javax.swing.JRadioButton();
phaseNorValCheck = new javax.swing.JRadioButton();
phaseNorText = new javax.swing.JTextField();
phaseNorNoneCheck = new javax.swing.JRadioButton();
jButton1.setText("jButton1");
setDefaultCloseOperation(javax.swing.WindowConstants.DO_NOTHING_ON_CLOSE);
setTitle("HoloPatterns");
addWindowListener(new java.awt.event.WindowAdapter() {
public void windowClosing(java.awt.event.WindowEvent evt) {
formWindowClosing(evt);
}
});
jLabel2.setText("fr:");
frText.setText("1");
patternsGroup.add(helicalCheck);
helicalCheck.setSelected(true);
helicalCheck.setText("helical");
patternsGroup.add(gridCheck);
gridCheck.setText("HGrid");
setButton.setText("Set");
setButton.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
setButtonActionPerformed(evt);
}
});
addButton.setText("Add");
addButton.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
addButtonActionPerformed(evt);
}
});
sustractButton.setText("Sustract");
sustractButton.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
sustractButtonActionPerformed(evt);
}
});
mButton.setText("M");
mButton.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
mButtonActionPerformed(evt);
}
});
mSetButton.setText("Set");
mSetButton.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
mSetButtonActionPerformed(evt);
}
});
mAddButton.setText("Add");
mAddButton.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
mAddButtonActionPerformed(evt);
}
});
mSustractButton.setText("Sustract");
mSustractButton.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
mSustractButtonActionPerformed(evt);
}
});
gaussianAmpButton.setText("Gaussian Amp");
gaussianAmpButton.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
gaussianAmpButtonActionPerformed(evt);
}
});
jLabel1.setText("var");
varText.setText("0.2");
savePhasesButton.setText("save");
savePhasesButton.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
savePhasesButtonActionPerformed(evt);
}
});
loadPhasesButton.setText("load");
loadPhasesButton.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
loadPhasesButtonActionPerformed(evt);
}
});
onlySelectionCheck.setText("only selected");
uniformPhaseButton.setText("UniformPhase");
uniformPhaseButton.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
uniformPhaseButtonActionPerformed(evt);
}
});
moduleText.setText("1");
jLabel3.setText("m:");
patternsGroup.add(radialCheck);
radialCheck.setText("radial");
jLabel4.setText("offset:");
offsetText.setText("0");
halfSplitCheck.setText("HalfSplit");
normalizePhaseButton.setText("NormalizePhase");
normalizePhaseButton.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
normalizePhaseButtonActionPerformed(evt);
}
});
phaseNormalizationGroup.add(phaseNorFirstCheck);
phaseNorFirstCheck.setSelected(true);
phaseNorFirstCheck.setText("first");
phaseNormalizationGroup.add(phaseNorMinCheck);
phaseNorMinCheck.setText("min");
phaseNormalizationGroup.add(phaseNorValCheck);
phaseNorValCheck.setText("val");
phaseNorText.setText("0.3");
phaseNormalizationGroup.add(phaseNorNoneCheck);
phaseNorNoneCheck.setText("none");
javax.swing.GroupLayout layout = new javax.swing.GroupLayout(getContentPane());
getContentPane().setLayout(layout);
layout.setHorizontalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addContainerGap()
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addComponent(gaussianAmpButton)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(jLabel1)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(varText))
.addGroup(layout.createSequentialGroup()
.addComponent(savePhasesButton)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(loadPhasesButton))
.addGroup(layout.createSequentialGroup()
.addComponent(jLabel2)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(frText, javax.swing.GroupLayout.PREFERRED_SIZE, 105, javax.swing.GroupLayout.PREFERRED_SIZE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(jLabel3)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(moduleText))
.addGroup(layout.createSequentialGroup()
.addComponent(setButton, javax.swing.GroupLayout.PREFERRED_SIZE, 72, javax.swing.GroupLayout.PREFERRED_SIZE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(addButton)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(sustractButton))
.addGroup(layout.createSequentialGroup()
.addComponent(mButton)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(mSetButton)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(mAddButton)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(mSustractButton))
.addGroup(layout.createSequentialGroup()
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addComponent(phaseNorNoneCheck)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(phaseNorFirstCheck)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(phaseNorMinCheck)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(phaseNorValCheck)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(phaseNorText, javax.swing.GroupLayout.PREFERRED_SIZE, 56, javax.swing.GroupLayout.PREFERRED_SIZE))
.addComponent(onlySelectionCheck)
.addGroup(layout.createSequentialGroup()
.addComponent(uniformPhaseButton)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(normalizePhaseButton))
.addGroup(layout.createSequentialGroup()
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)
.addGroup(layout.createSequentialGroup()
.addComponent(jLabel4)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(offsetText, javax.swing.GroupLayout.PREFERRED_SIZE, 120, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGroup(javax.swing.GroupLayout.Alignment.LEADING, layout.createSequentialGroup()
.addComponent(helicalCheck)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(gridCheck)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(radialCheck)))
.addGap(10, 10, 10)
.addComponent(halfSplitCheck)))
.addGap(0, 0, Short.MAX_VALUE)))
.addContainerGap())
);
layout.setVerticalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addContainerGap()
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel2)
.addComponent(frText, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(jLabel3)
.addComponent(moduleText, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel4)
.addComponent(offsetText, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(halfSplitCheck))
.addGap(5, 5, 5)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(helicalCheck)
.addComponent(gridCheck)
.addComponent(radialCheck))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addComponent(onlySelectionCheck)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(setButton)
.addComponent(addButton)
.addComponent(sustractButton))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(mButton)
.addComponent(mSetButton)
.addComponent(mAddButton)
.addComponent(mSustractButton))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(savePhasesButton)
.addComponent(loadPhasesButton))
.addGap(18, 18, Short.MAX_VALUE)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(uniformPhaseButton)
.addComponent(normalizePhaseButton))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(phaseNorFirstCheck)
.addComponent(phaseNorMinCheck)
.addComponent(phaseNorValCheck)
.addComponent(phaseNorText, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(phaseNorNoneCheck))
.addGap(11, 11, 11)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(gaussianAmpButton)
.addComponent(jLabel1)
.addComponent(varText, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addContainerGap())
);
pack();
}// </editor-fold>//GEN-END:initComponents
public void setHoloMemory(float[] phases){
if (phases != null){
mPhases.clear();
for(float phase : phases){
mPhases.add( phase );
}
}
}
public float[] getHoloMemory(){
final int size = mPhases.size();
final float[] phases = new float[ size ];
for(int i = 0; i < size; ++i){
phases[i] = mPhases.get(i);
}
return phases;
}
private void applyOpToArray(BasicOperation o){
Vector3f min = new Vector3f(), max = new Vector3f();
ArrayList<Transducer> ts;
if (onlySelectionCheck.isSelected()){
ts = new ArrayList<>();
for(Entity e : form.getSelection()){
if (e instanceof Transducer){
ts.add( (Transducer) e);
}
}
}else{
ts = form.simulation.getTransducers();
}
Scene.calcBoundaries(ts, min, max);
final Vector3f size = max.subtract( min );
final Vector3f center = max.add(min).divideLocal( 2 );
final float fr = Parse.toFloat( frText.getText() );
final float m = Parse.toFloat( moduleText.getText() );
final float offset = Parse.toFloat( offsetText.getText() );
final boolean halfSplit = halfSplitCheck.isSelected();
for(Transducer t : ts){
final Vector3f pos = t.getTransform().getTranslation();
final Vector3f npos3 = pos.subtract( center ).divideLocal( size );
final Vector2f p = new Vector2f( npos3.x, npos3.z);
float value = 0;
if (helicalCheck.isSelected()){
final int mp = (int) (p.length() * m);
final float revOffset = mp % 2 == 0 ? 0 : M.PI;
value = ( (( p.getAngle() - revOffset + offset) * fr / M.PI) ) % 2.0f;
if (halfSplit){
if (value >= 0.0f && value <= 1.0f) { value = 0.0f;}
else { value = 1.0f; }
}
}else if(gridCheck.isSelected()){
value = 2.0f * M.sin(M.TWO_PI * p.x * fr);
}else if(radialCheck.isSelected()){
value = (p.length() * m) % m;
}
applyToTransducer(t, value, o);
}
}
private void setButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_setButtonActionPerformed
applyOpToArray(BasicOperation.opSet);
form.needUpdate();
}//GEN-LAST:event_setButtonActionPerformed
private void addButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_addButtonActionPerformed
applyOpToArray(BasicOperation.opAdd);
form.needUpdate();
}//GEN-LAST:event_addButtonActionPerformed
public void addMemorizedHoloPattern(){
applyMemory(BasicOperation.opAdd);
form.needUpdate();
}
public void addCurrentPattern(){
applyOpToArray(BasicOperation.opAdd);
form.needUpdate();
}
private void sustractButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_sustractButtonActionPerformed
applyOpToArray(BasicOperation.opSus);
form.needUpdate();
}//GEN-LAST:event_sustractButtonActionPerformed
public void memorizePattern(){
mPhases.clear();
final ArrayList<Transducer> ts = form.simulation.getTransducers();
for(Transducer t : ts){
mPhases.add( t.getPhase() );
}
}
private void mButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_mButtonActionPerformed
memorizePattern();
}//GEN-LAST:event_mButtonActionPerformed
private void applyToTransducer(Transducer t, float phase, BasicOperation o){
if(o == BasicOperation.opSet){
t.setPhase( phase );
}else if(o == BasicOperation.opAdd){
t.setPhase( t.getPhase() + phase );
}else if(o == BasicOperation.opSus){
t.setPhase( t.getPhase() - phase );
}
}
private void applyMemory(BasicOperation o){
final ArrayList<Transducer> ts = form.simulation.getTransducers();
final int n = mPhases.size();
int index = 0;
for(Transducer t : ts){
if (index < n){
applyToTransducer(t, mPhases.get(index), o);
index++;
}else{
break;
}
}
}
private void mSetButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_mSetButtonActionPerformed
applyMemory(BasicOperation.opSet);
form.needUpdate();
}//GEN-LAST:event_mSetButtonActionPerformed
private void mAddButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_mAddButtonActionPerformed
applyMemory(BasicOperation.opAdd);
form.needUpdate();
}//GEN-LAST:event_mAddButtonActionPerformed
private void mSustractButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_mSustractButtonActionPerformed
applyMemory(BasicOperation.opSus);
form.needUpdate();
}//GEN-LAST:event_mSustractButtonActionPerformed
public void subtractFromHoloMemory(){
applyMemory(BasicOperation.opSus);
form.needUpdate();
}
private void gaussianAmpButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_gaussianAmpButtonActionPerformed
Vector3f min = new Vector3f(), max = new Vector3f();
final ArrayList<Transducer> ts = form.simulation.getTransducers();
Scene.calcBoundaries(ts, min, max);
final Vector3f size = max.subtract( min );
final Vector3f center = max.add(min).divideLocal( 2 );
final float var = Parse.toFloat( varText.getText() );
for(Transducer t : ts){
final Vector3f pos = t.getTransform().getTranslation();
final Vector3f npos3 = pos.subtract( center ).divideLocal( size );
final Vector2f p = new Vector2f( npos3.x, npos3.z);
float a = 1.0f / M.sqrt(M.TWO_PI * var);
float b = 0.0f;
float c = M.sqrt(var);
float dist = p.length();
float gaussian = a * M.exp(- (M.sqr(dist-b) / (2.0f*c*c)));
t.setpAmplitude(gaussian);
}
form.needUpdate();
}//GEN-LAST:event_gaussianAmpButtonActionPerformed
public void uniformPhase(){
final ArrayList<Transducer> ts = form.simulation.getTransducers();
for(Transducer t : ts){
float phase = t.getPhase();
while(phase < 0.0f){
phase += 2.0f;
}
t.setPhase(phase);
}
}
public void normalizePhase(){
final ArrayList<Transducer> ts = form.simulation.getTransducers();
float phaseToSubtract = 0.0f;
if (phaseNorFirstCheck.isSelected()){
phaseToSubtract = ts.get(0).getPhase();
}else if(phaseNorMinCheck.isSelected()){
phaseToSubtract = Float.MAX_VALUE;
for(Transducer t : ts){
phaseToSubtract = M.min(t.getPhase(), phaseToSubtract);
}
}else if(phaseNorValCheck.isSelected()){
phaseToSubtract = Parse.toFloat( phaseNorText.getText() );
}else if(phaseNorNoneCheck.isSelected()){
return;
}
for(Transducer t : ts){
t.setPhase( t.getPhase() - phaseToSubtract );
}
}
private void uniformPhaseButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_uniformPhaseButtonActionPerformed
uniformPhase();
form.needUpdate();
}//GEN-LAST:event_uniformPhaseButtonActionPerformed
private void savePhasesButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_savePhasesButtonActionPerformed
String path = FileUtils.selectNonExistingFile(this, ".txt");
if(path != null){
try {
FileUtils.writeBytesInFile(new File(path), Parse.printFloats(mPhases).getBytes() );
} catch (IOException ex) {
Logger.getLogger(HoloPatternsForm.class.getName()).log(Level.SEVERE, null, ex);
}
}
}//GEN-LAST:event_savePhasesButtonActionPerformed
private void loadPhasesButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_loadPhasesButtonActionPerformed
String path = FileUtils.selectFile(this, "open", ".txt", null);
if(path != null){
try {
String content = new String( FileUtils.getBytesFromFile(new File(path)) );
mPhases.clear();
mPhases.addAll( Parse.parseFloats(content) );
} catch (IOException ex) {
Logger.getLogger(HoloPatternsForm.class.getName()).log(Level.SEVERE, null, ex);
}
}
}//GEN-LAST:event_loadPhasesButtonActionPerformed
private void normalizePhaseButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_normalizePhaseButtonActionPerformed
normalizePhase();
form.needUpdate();
}//GEN-LAST:event_normalizePhaseButtonActionPerformed
private void formWindowClosing(java.awt.event.WindowEvent evt) {//GEN-FIRST:event_formWindowClosing
setVisible( false );
}//GEN-LAST:event_formWindowClosing
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.JButton addButton;
private javax.swing.JTextField frText;
private javax.swing.JButton gaussianAmpButton;
private javax.swing.JRadioButton gridCheck;
private javax.swing.JCheckBox halfSplitCheck;
private javax.swing.JRadioButton helicalCheck;
private javax.swing.JButton jButton1;
private javax.swing.JLabel jLabel1;
private javax.swing.JLabel jLabel2;
private javax.swing.JLabel jLabel3;
private javax.swing.JLabel jLabel4;
private javax.swing.JButton loadPhasesButton;
private javax.swing.JButton mAddButton;
private javax.swing.JButton mButton;
private javax.swing.JButton mSetButton;
private javax.swing.JButton mSustractButton;
private javax.swing.JTextField moduleText;
private javax.swing.JButton normalizePhaseButton;
private javax.swing.JTextField offsetText;
private javax.swing.JCheckBox onlySelectionCheck;
private javax.swing.ButtonGroup patternsGroup;
private javax.swing.JRadioButton phaseNorFirstCheck;
private javax.swing.JRadioButton phaseNorMinCheck;
private javax.swing.JRadioButton phaseNorNoneCheck;
private javax.swing.JTextField phaseNorText;
private javax.swing.JRadioButton phaseNorValCheck;
private javax.swing.ButtonGroup phaseNormalizationGroup;
private javax.swing.JRadioButton radialCheck;
private javax.swing.JButton savePhasesButton;
private javax.swing.JButton setButton;
private javax.swing.JButton sustractButton;
private javax.swing.JButton uniformPhaseButton;
private javax.swing.JTextField varText;
// End of variables declaration//GEN-END:variables
}
|
// GET GOALS
export const loadGoals = (goals) => {
return {
type: 'LOAD_GOALS',
goals
};
};
// POST GOAL
export const addGoal = (goal) => {
return {
type: 'ADD_GOAL',
goal
};
};
// EDIT GOAL
export const editGoal = (goal) => {
return {
type: 'EDIT_GOAL',
goal
};
};
// DELETE GOAL
export const deleteGoal = (goalId) => {
return {
type: 'DELETE_GOAL',
id: goalId
};
};
export const getGoals = () => {
return (dispatch) => {
fetch('http://localhost:3000/api/v1/goals', {
method: 'GET',
headers: {
'Content-Type': 'application/json'
}
})
.then((res) => res.json())
.then((goals) => {
dispatch(loadGoals(goals));
})
.catch(console.error);
};
};
export const postGoal = (
goalDate,
goal,
fitBitUser
) => {
return (dispatch) => {
fetch('http://localhost:3000/api/v1/goals', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
goalDate,
goal,
user_id: fitBitUser.user.encodedId
})
})
.then((res) => res.json())
.then((goal) => {
dispatch(addGoal(goal));
})
.catch(console.error);
};
};
export const updateGoal = (goalId, goalDate, goal) => {
return (dispatch) => {
fetch(`http://localhost:3000/api/v1/goals/${goalId}`, {
method: 'PATCH',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
goalDate: goalDate,
goal: goal
})
})
.then((res) => res.json())
.then((goal) => {
dispatch(editGoal(goal));
})
.catch(console.error);
};
};
export const destroyGoal = (goalId) => {
return (dispatch) => {
fetch(`http://localhost:3000/api/v1/goals/${goalId}`, {
method: 'DELETE',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
id: goalId
})
})
.then((res) => res.json())
.then((data) => {
dispatch(deleteGoal(data.id));
})
.catch(console.error);
};
}; |
export interface IProducts {
product_id: string;
product_img: string;
product_name: string;
product_price: number;
product_details: string;
product_quantity: number;
product_For: string;
}
// export interface IWashType {
// wash_id: string
// wash_Details: string;
// price: number;
// quantity: number;
// }
|
#!/bin/sh
set -e
PKI=$TASKDDATA"/pki"
if [ ! -d "$PKI" ]; then
mkdir -p "$PKI"
cp /usr/share/taskd/pki/generate* "$PKI"
cp /usr/share/taskd/pki/vars "$PKI"
taskd init > /dev/null 2>&1
fi
# Generate self sign certificate if none exists
# Also generate user certificate if env vars: USER/USER_ORG
if [ ! -f "$PKI/ca.cert.pem" ]; then
cd "$PKI"
[ -n "$CERT_CN" ] && sed -i "s/\(CN=\).*/\1'$CERT_CN'/" vars
[ -n "$CERT_ORGANIZATION" ] && sed -i "s/\(ORGANIZATION=\).*/\1'$CERT_ORGANIZATION'/" vars
[ -n "$CERT_COUNTRY" ] && sed -i "s/\(COUNTRY=\).*/\1'$CERT_COUNTRY'/" vars
[ -n "$CERT_STATE" ] && sed -i "s/\(STATE=\).*/\1'$CERT_STATE'/" vars
[ -n "$CERT_LOCALITY" ] && sed -i "s/\(LOCALITY=\).*/\1'$CERT_LOCALITY'/" vars
./generate > /dev/null 2>&1
taskd config --force client.cert "$PKI/client.cert.pem"
taskd config --force client.key "$PKI/client.key.pem"
taskd config --force server.cert "$PKI/server.cert.pem"
taskd config --force server.key "$PKI/server.key.pem"
taskd config --force server.crl "$PKI/server.crl.pem"
taskd config --force ca.cert "$PKI/ca.cert.pem"
#taskd config --force log "$TASKDDATA/taskd.log"
taskd config --force pid.file "$TASKDDATA/taskd.pid"
taskd config --force server 0.0.0.0:53589
[ -n "$USER_ORG" ] && taskd add org "$USER_ORG"
if [ -n "$USER" ]; then
taskd add user "$USER_ORG" "$USER"
./generate.client "$USER" > /dev/null 2>&1
printf "\nCertificate for %s/%s \n" "$USER" "$USER_ORG"
cat "$USER.cert.pem"
printf "\nKey for %s/%s \n" "$USER" "$USER_ORG"
cat "$USER.key.pem"
printf "\nCA\n"
cat ca.cert.pem
fi
chown -R taskd:taskd "$TASKDDATA"
else
printf "Certificates already generated, starting taskd"
fi
exec "$@"
|
<reponame>aschmidt75/wgmesh
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.13.0
// source: meshservice.proto
package meshservice
import (
proto "github.com/golang/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type HandshakeResponse_Result int32
const (
HandshakeResponse_OK HandshakeResponse_Result = 0
HandshakeResponse_ERROR HandshakeResponse_Result = 1
)
// Enum value maps for HandshakeResponse_Result.
var (
HandshakeResponse_Result_name = map[int32]string{
0: "OK",
1: "ERROR",
}
HandshakeResponse_Result_value = map[string]int32{
"OK": 0,
"ERROR": 1,
}
)
func (x HandshakeResponse_Result) Enum() *HandshakeResponse_Result {
p := new(HandshakeResponse_Result)
*p = x
return p
}
func (x HandshakeResponse_Result) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (HandshakeResponse_Result) Descriptor() protoreflect.EnumDescriptor {
return file_meshservice_proto_enumTypes[0].Descriptor()
}
func (HandshakeResponse_Result) Type() protoreflect.EnumType {
return &file_meshservice_proto_enumTypes[0]
}
func (x HandshakeResponse_Result) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use HandshakeResponse_Result.Descriptor instead.
func (HandshakeResponse_Result) EnumDescriptor() ([]byte, []int) {
return file_meshservice_proto_rawDescGZIP(), []int{2, 0}
}
type JoinResponse_Result int32
const (
JoinResponse_OK JoinResponse_Result = 0
JoinResponse_ERROR JoinResponse_Result = 1
)
// Enum value maps for JoinResponse_Result.
var (
JoinResponse_Result_name = map[int32]string{
0: "OK",
1: "ERROR",
}
JoinResponse_Result_value = map[string]int32{
"OK": 0,
"ERROR": 1,
}
)
func (x JoinResponse_Result) Enum() *JoinResponse_Result {
p := new(JoinResponse_Result)
*p = x
return p
}
func (x JoinResponse_Result) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (JoinResponse_Result) Descriptor() protoreflect.EnumDescriptor {
return file_meshservice_proto_enumTypes[1].Descriptor()
}
func (JoinResponse_Result) Type() protoreflect.EnumType {
return &file_meshservice_proto_enumTypes[1]
}
func (x JoinResponse_Result) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use JoinResponse_Result.Descriptor instead.
func (JoinResponse_Result) EnumDescriptor() ([]byte, []int) {
return file_meshservice_proto_rawDescGZIP(), []int{4, 0}
}
type Peer_AnnouncementType int32
const (
Peer_JOIN Peer_AnnouncementType = 0
Peer_LEAVE Peer_AnnouncementType = 1
)
// Enum value maps for Peer_AnnouncementType.
var (
Peer_AnnouncementType_name = map[int32]string{
0: "JOIN",
1: "LEAVE",
}
Peer_AnnouncementType_value = map[string]int32{
"JOIN": 0,
"LEAVE": 1,
}
)
func (x Peer_AnnouncementType) Enum() *Peer_AnnouncementType {
p := new(Peer_AnnouncementType)
*p = x
return p
}
func (x Peer_AnnouncementType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Peer_AnnouncementType) Descriptor() protoreflect.EnumDescriptor {
return file_meshservice_proto_enumTypes[2].Descriptor()
}
func (Peer_AnnouncementType) Type() protoreflect.EnumType {
return &file_meshservice_proto_enumTypes[2]
}
func (x Peer_AnnouncementType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Peer_AnnouncementType.Descriptor instead.
func (Peer_AnnouncementType) EnumDescriptor() ([]byte, []int) {
return file_meshservice_proto_rawDescGZIP(), []int{5, 0}
}
type Empty struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *Empty) Reset() {
*x = Empty{}
if protoimpl.UnsafeEnabled {
mi := &file_meshservice_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Empty) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Empty) ProtoMessage() {}
func (x *Empty) ProtoReflect() protoreflect.Message {
mi := &file_meshservice_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Empty.ProtoReflect.Descriptor instead.
func (*Empty) Descriptor() ([]byte, []int) {
return file_meshservice_proto_rawDescGZIP(), []int{0}
}
// HandshakeRequest includes details about which mesh to join
type HandshakeRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// name of mesh to join
MeshName string `protobuf:"bytes,1,opt,name=meshName,proto3" json:"meshName,omitempty"`
}
func (x *HandshakeRequest) Reset() {
*x = HandshakeRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_meshservice_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *HandshakeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HandshakeRequest) ProtoMessage() {}
func (x *HandshakeRequest) ProtoReflect() protoreflect.Message {
mi := &file_meshservice_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HandshakeRequest.ProtoReflect.Descriptor instead.
func (*HandshakeRequest) Descriptor() ([]byte, []int) {
return file_meshservice_proto_rawDescGZIP(), []int{1}
}
func (x *HandshakeRequest) GetMeshName() string {
if x != nil {
return x.MeshName
}
return ""
}
// HandshakeResponse indicates if joining the desired mesh is
// acceptable and may include authenication/authorization
// requirements which joining nodes have to fulfil.
type HandshakeResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Result HandshakeResponse_Result `protobuf:"varint,1,opt,name=result,proto3,enum=meshservice.HandshakeResponse_Result" json:"result,omitempty"`
ErrorMessage string `protobuf:"bytes,2,opt,name=errorMessage,proto3" json:"errorMessage,omitempty"`
// token which joining node has to reuse when using Join/Peers methods
JoinToken string `protobuf:"bytes,3,opt,name=joinToken,proto3" json:"joinToken,omitempty"`
// additional authentication/authorization requirements which joining nodes have to fulfil
// Reserved for future use
AuthReqs map[string]string `protobuf:"bytes,4,rep,name=authReqs,proto3" json:"authReqs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (x *HandshakeResponse) Reset() {
*x = HandshakeResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_meshservice_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *HandshakeResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HandshakeResponse) ProtoMessage() {}
func (x *HandshakeResponse) ProtoReflect() protoreflect.Message {
mi := &file_meshservice_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HandshakeResponse.ProtoReflect.Descriptor instead.
func (*HandshakeResponse) Descriptor() ([]byte, []int) {
return file_meshservice_proto_rawDescGZIP(), []int{2}
}
func (x *HandshakeResponse) GetResult() HandshakeResponse_Result {
if x != nil {
return x.Result
}
return HandshakeResponse_OK
}
func (x *HandshakeResponse) GetErrorMessage() string {
if x != nil {
return x.ErrorMessage
}
return ""
}
func (x *HandshakeResponse) GetJoinToken() string {
if x != nil {
return x.JoinToken
}
return ""
}
func (x *HandshakeResponse) GetAuthReqs() map[string]string {
if x != nil {
return x.AuthReqs
}
return nil
}
// JoinRequest is sent by a joining node when their wireguard interface
// is set up and is ready to join. It includes wireguard details such as
// the public key etc, and an optional node name.
type JoinRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// wireguard: public key of joining node
Pubkey string `protobuf:"bytes,1,opt,name=pubkey,proto3" json:"pubkey,omitempty"`
// wireguard: endpoint IP of joining node
EndpointIP string `protobuf:"bytes,2,opt,name=endpointIP,proto3" json:"endpointIP,omitempty"`
// wireguard: endpoint UDP port of joining node
EndpointPort int32 `protobuf:"varint,3,opt,name=endpointPort,proto3" json:"endpointPort,omitempty"`
// name of mesh to join
MeshName string `protobuf:"bytes,4,opt,name=meshName,proto3" json:"meshName,omitempty"`
// optional name of node
NodeName string `protobuf:"bytes,5,opt,name=nodeName,proto3" json:"nodeName,omitempty"`
}
func (x *JoinRequest) Reset() {
*x = JoinRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_meshservice_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *JoinRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*JoinRequest) ProtoMessage() {}
func (x *JoinRequest) ProtoReflect() protoreflect.Message {
mi := &file_meshservice_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use JoinRequest.ProtoReflect.Descriptor instead.
func (*JoinRequest) Descriptor() ([]byte, []int) {
return file_meshservice_proto_rawDescGZIP(), []int{3}
}
func (x *JoinRequest) GetPubkey() string {
if x != nil {
return x.Pubkey
}
return ""
}
func (x *JoinRequest) GetEndpointIP() string {
if x != nil {
return x.EndpointIP
}
return ""
}
func (x *JoinRequest) GetEndpointPort() int32 {
if x != nil {
return x.EndpointPort
}
return 0
}
func (x *JoinRequest) GetMeshName() string {
if x != nil {
return x.MeshName
}
return ""
}
func (x *JoinRequest) GetNodeName() string {
if x != nil {
return x.NodeName
}
return ""
}
// JoinResponse indicates if joinrequest has been accepted. If so,
// it includes an IP address for the joining node to assign to its
// wireguard interface, and additional data to fully join the mesh.
type JoinResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Result JoinResponse_Result `protobuf:"varint,1,opt,name=result,proto3,enum=meshservice.JoinResponse_Result" json:"result,omitempty"`
ErrorMessage string `protobuf:"bytes,2,opt,name=errorMessage,proto3" json:"errorMessage,omitempty"`
// this will be the joining's mesh ip
JoiningNodeMeshIP string `protobuf:"bytes,3,opt,name=joiningNodeMeshIP,proto3" json:"joiningNodeMeshIP,omitempty"`
// cidr of the mesh
MeshCidr string `protobuf:"bytes,4,opt,name=meshCidr,proto3" json:"meshCidr,omitempty"`
// creation time stamp
CreationTS int64 `protobuf:"varint,5,opt,name=creationTS,proto3" json:"creationTS,omitempty"`
// encryption key for serf gossip protocol
SerfEncryptionKey string `protobuf:"bytes,6,opt,name=serfEncryptionKey,proto3" json:"serfEncryptionKey,omitempty"`
// use serf LAN configuration (true) or WAN configuration (false)
SerfModeLAN bool `protobuf:"varint,7,opt,name=serfModeLAN,proto3" json:"serfModeLAN,omitempty"`
}
func (x *JoinResponse) Reset() {
*x = JoinResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_meshservice_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *JoinResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*JoinResponse) ProtoMessage() {}
func (x *JoinResponse) ProtoReflect() protoreflect.Message {
mi := &file_meshservice_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use JoinResponse.ProtoReflect.Descriptor instead.
func (*JoinResponse) Descriptor() ([]byte, []int) {
return file_meshservice_proto_rawDescGZIP(), []int{4}
}
func (x *JoinResponse) GetResult() JoinResponse_Result {
if x != nil {
return x.Result
}
return JoinResponse_OK
}
func (x *JoinResponse) GetErrorMessage() string {
if x != nil {
return x.ErrorMessage
}
return ""
}
func (x *JoinResponse) GetJoiningNodeMeshIP() string {
if x != nil {
return x.JoiningNodeMeshIP
}
return ""
}
func (x *JoinResponse) GetMeshCidr() string {
if x != nil {
return x.MeshCidr
}
return ""
}
func (x *JoinResponse) GetCreationTS() int64 {
if x != nil {
return x.CreationTS
}
return 0
}
func (x *JoinResponse) GetSerfEncryptionKey() string {
if x != nil {
return x.SerfEncryptionKey
}
return ""
}
func (x *JoinResponse) GetSerfModeLAN() bool {
if x != nil {
return x.SerfModeLAN
}
return false
}
// Peer contains connection data for an individual
// Wireguard Peer
type Peer struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Type Peer_AnnouncementType `protobuf:"varint,1,opt,name=type,proto3,enum=meshservice.Peer_AnnouncementType" json:"type,omitempty"`
Pubkey string `protobuf:"bytes,2,opt,name=pubkey,proto3" json:"pubkey,omitempty"` // public key
EndpointIP string `protobuf:"bytes,3,opt,name=endpointIP,proto3" json:"endpointIP,omitempty"` // endpoint
EndpointPort int32 `protobuf:"varint,4,opt,name=endpointPort,proto3" json:"endpointPort,omitempty"` // endpoint
MeshIP string `protobuf:"bytes,5,opt,name=meshIP,proto3" json:"meshIP,omitempty"` // internal mesh ip
}
func (x *Peer) Reset() {
*x = Peer{}
if protoimpl.UnsafeEnabled {
mi := &file_meshservice_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Peer) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Peer) ProtoMessage() {}
func (x *Peer) ProtoReflect() protoreflect.Message {
mi := &file_meshservice_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Peer.ProtoReflect.Descriptor instead.
func (*Peer) Descriptor() ([]byte, []int) {
return file_meshservice_proto_rawDescGZIP(), []int{5}
}
func (x *Peer) GetType() Peer_AnnouncementType {
if x != nil {
return x.Type
}
return Peer_JOIN
}
func (x *Peer) GetPubkey() string {
if x != nil {
return x.Pubkey
}
return ""
}
func (x *Peer) GetEndpointIP() string {
if x != nil {
return x.EndpointIP
}
return ""
}
func (x *Peer) GetEndpointPort() int32 {
if x != nil {
return x.EndpointPort
}
return 0
}
func (x *Peer) GetMeshIP() string {
if x != nil {
return x.MeshIP
}
return ""
}
type RTTRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
RequestedBy string `protobuf:"bytes,1,opt,name=requestedBy,proto3" json:"requestedBy,omitempty"` // node name
}
func (x *RTTRequest) Reset() {
*x = RTTRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_meshservice_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *RTTRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RTTRequest) ProtoMessage() {}
func (x *RTTRequest) ProtoReflect() protoreflect.Message {
mi := &file_meshservice_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RTTRequest.ProtoReflect.Descriptor instead.
func (*RTTRequest) Descriptor() ([]byte, []int) {
return file_meshservice_proto_rawDescGZIP(), []int{6}
}
func (x *RTTRequest) GetRequestedBy() string {
if x != nil {
return x.RequestedBy
}
return ""
}
type RTTResponseInfo struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Node string `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` // node name
RttMsec int32 `protobuf:"varint,2,opt,name=rttMsec,proto3" json:"rttMsec,omitempty"`
}
func (x *RTTResponseInfo) Reset() {
*x = RTTResponseInfo{}
if protoimpl.UnsafeEnabled {
mi := &file_meshservice_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *RTTResponseInfo) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RTTResponseInfo) ProtoMessage() {}
func (x *RTTResponseInfo) ProtoReflect() protoreflect.Message {
mi := &file_meshservice_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RTTResponseInfo.ProtoReflect.Descriptor instead.
func (*RTTResponseInfo) Descriptor() ([]byte, []int) {
return file_meshservice_proto_rawDescGZIP(), []int{7}
}
func (x *RTTResponseInfo) GetNode() string {
if x != nil {
return x.Node
}
return ""
}
func (x *RTTResponseInfo) GetRttMsec() int32 {
if x != nil {
return x.RttMsec
}
return 0
}
type RTTResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Node string `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` // node name
Rtts []*RTTResponseInfo `protobuf:"bytes,2,rep,name=rtts,proto3" json:"rtts,omitempty"`
}
func (x *RTTResponse) Reset() {
*x = RTTResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_meshservice_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *RTTResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RTTResponse) ProtoMessage() {}
func (x *RTTResponse) ProtoReflect() protoreflect.Message {
mi := &file_meshservice_proto_msgTypes[8]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RTTResponse.ProtoReflect.Descriptor instead.
func (*RTTResponse) Descriptor() ([]byte, []int) {
return file_meshservice_proto_rawDescGZIP(), []int{8}
}
func (x *RTTResponse) GetNode() string {
if x != nil {
return x.Node
}
return ""
}
func (x *RTTResponse) GetRtts() []*RTTResponseInfo {
if x != nil {
return x.Rtts
}
return nil
}
var File_meshservice_proto protoreflect.FileDescriptor
var file_meshservice_proto_rawDesc = []byte{
0x0a, 0x11, 0x6d, 0x65, 0x73, 0x68, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x6d, 0x65, 0x73, 0x68, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x2e, 0x0a, 0x10, 0x48, 0x61, 0x6e,
0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a,
0x08, 0x6d, 0x65, 0x73, 0x68, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x08, 0x6d, 0x65, 0x73, 0x68, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xb8, 0x02, 0x0a, 0x11, 0x48, 0x61,
0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
0x3d, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
0x25, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x48, 0x61,
0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e,
0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x22,
0x0a, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02,
0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61,
0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6a, 0x6f, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6a, 0x6f, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
0x12, 0x48, 0x0a, 0x08, 0x61, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, 0x73, 0x18, 0x04, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
0x52, 0x08, 0x61, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, 0x73, 0x1a, 0x3b, 0x0a, 0x0d, 0x41, 0x75,
0x74, 0x68, 0x52, 0x65, 0x71, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61,
0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x1b, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c,
0x74, 0x12, 0x06, 0x0a, 0x02, 0x4f, 0x4b, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52,
0x4f, 0x52, 0x10, 0x01, 0x22, 0xa1, 0x01, 0x0a, 0x0b, 0x4a, 0x6f, 0x69, 0x6e, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x12, 0x1e, 0x0a, 0x0a,
0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x49, 0x50, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x0a, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x49, 0x50, 0x12, 0x22, 0x0a, 0x0c,
0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01,
0x28, 0x05, 0x52, 0x0c, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x50, 0x6f, 0x72, 0x74,
0x12, 0x1a, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x68, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01,
0x28, 0x09, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x68, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08,
0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xc3, 0x02, 0x0a, 0x0c, 0x4a, 0x6f, 0x69,
0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x06, 0x72, 0x65, 0x73,
0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x6d, 0x65, 0x73, 0x68,
0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4a, 0x6f, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73,
0x75, 0x6c, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73,
0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72,
0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x6a, 0x6f, 0x69, 0x6e, 0x69,
0x6e, 0x67, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x73, 0x68, 0x49, 0x50, 0x18, 0x03, 0x20, 0x01,
0x28, 0x09, 0x52, 0x11, 0x6a, 0x6f, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x4e, 0x6f, 0x64, 0x65, 0x4d,
0x65, 0x73, 0x68, 0x49, 0x50, 0x12, 0x1a, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x68, 0x43, 0x69, 0x64,
0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x68, 0x43, 0x69, 0x64,
0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x53, 0x18,
0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54,
0x53, 0x12, 0x2c, 0x0a, 0x11, 0x73, 0x65, 0x72, 0x66, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x65,
0x72, 0x66, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12,
0x20, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x66, 0x4d, 0x6f, 0x64, 0x65, 0x4c, 0x41, 0x4e, 0x18, 0x07,
0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x66, 0x4d, 0x6f, 0x64, 0x65, 0x4c, 0x41,
0x4e, 0x22, 0x1b, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x06, 0x0a, 0x02, 0x4f,
0x4b, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x01, 0x22, 0xdb,
0x01, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, 0x36, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18,
0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x73, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63,
0x65, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12,
0x16, 0x0a, 0x06, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
0x06, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x65, 0x6e, 0x64, 0x70, 0x6f,
0x69, 0x6e, 0x74, 0x49, 0x50, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x6e, 0x64,
0x70, 0x6f, 0x69, 0x6e, 0x74, 0x49, 0x50, 0x12, 0x22, 0x0a, 0x0c, 0x65, 0x6e, 0x64, 0x70, 0x6f,
0x69, 0x6e, 0x74, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x65,
0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6d,
0x65, 0x73, 0x68, 0x49, 0x50, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x73,
0x68, 0x49, 0x50, 0x22, 0x27, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x6d,
0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4a, 0x4f, 0x49, 0x4e, 0x10,
0x00, 0x12, 0x09, 0x0a, 0x05, 0x4c, 0x45, 0x41, 0x56, 0x45, 0x10, 0x01, 0x22, 0x2e, 0x0a, 0x0a,
0x52, 0x54, 0x54, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x79, 0x22, 0x3f, 0x0a, 0x0f,
0x52, 0x54, 0x54, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12,
0x12, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x74, 0x74, 0x4d, 0x73, 0x65, 0x63, 0x18, 0x02,
0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x72, 0x74, 0x74, 0x4d, 0x73, 0x65, 0x63, 0x22, 0x53, 0x0a,
0x0b, 0x52, 0x54, 0x54, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04,
0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65,
0x12, 0x30, 0x0a, 0x04, 0x72, 0x74, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c,
0x2e, 0x6d, 0x65, 0x73, 0x68, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x52, 0x54, 0x54,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x72, 0x74,
0x74, 0x73, 0x32, 0xc3, 0x01, 0x0a, 0x04, 0x4d, 0x65, 0x73, 0x68, 0x12, 0x48, 0x0a, 0x05, 0x42,
0x65, 0x67, 0x69, 0x6e, 0x12, 0x1d, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x04, 0x4a, 0x6f, 0x69, 0x6e, 0x12, 0x18, 0x2e,
0x6d, 0x65, 0x73, 0x68, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4a, 0x6f, 0x69, 0x6e,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4a, 0x6f, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x22, 0x00, 0x12, 0x32, 0x0a, 0x05, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x12, 0x2e,
0x6d, 0x65, 0x73, 0x68, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x45, 0x6d, 0x70, 0x74,
0x79, 0x1a, 0x11, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e,
0x50, 0x65, 0x65, 0x72, 0x22, 0x00, 0x30, 0x01, 0x42, 0x2a, 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68,
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x73, 0x63, 0x68, 0x6d, 0x69, 0x64, 0x74, 0x37,
0x35, 0x2f, 0x77, 0x67, 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x6d, 0x65, 0x73, 0x68, 0x73, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_meshservice_proto_rawDescOnce sync.Once
file_meshservice_proto_rawDescData = file_meshservice_proto_rawDesc
)
func file_meshservice_proto_rawDescGZIP() []byte {
file_meshservice_proto_rawDescOnce.Do(func() {
file_meshservice_proto_rawDescData = protoimpl.X.CompressGZIP(file_meshservice_proto_rawDescData)
})
return file_meshservice_proto_rawDescData
}
var file_meshservice_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
var file_meshservice_proto_msgTypes = make([]protoimpl.MessageInfo, 10)
var file_meshservice_proto_goTypes = []interface{}{
(HandshakeResponse_Result)(0), // 0: meshservice.HandshakeResponse.Result
(JoinResponse_Result)(0), // 1: meshservice.JoinResponse.Result
(Peer_AnnouncementType)(0), // 2: meshservice.Peer.AnnouncementType
(*Empty)(nil), // 3: meshservice.Empty
(*HandshakeRequest)(nil), // 4: meshservice.HandshakeRequest
(*HandshakeResponse)(nil), // 5: meshservice.HandshakeResponse
(*JoinRequest)(nil), // 6: meshservice.JoinRequest
(*JoinResponse)(nil), // 7: meshservice.JoinResponse
(*Peer)(nil), // 8: meshservice.Peer
(*RTTRequest)(nil), // 9: meshservice.RTTRequest
(*RTTResponseInfo)(nil), // 10: meshservice.RTTResponseInfo
(*RTTResponse)(nil), // 11: meshservice.RTTResponse
nil, // 12: meshservice.HandshakeResponse.AuthReqsEntry
}
var file_meshservice_proto_depIdxs = []int32{
0, // 0: meshservice.HandshakeResponse.result:type_name -> meshservice.HandshakeResponse.Result
12, // 1: meshservice.HandshakeResponse.authReqs:type_name -> meshservice.HandshakeResponse.AuthReqsEntry
1, // 2: meshservice.JoinResponse.result:type_name -> meshservice.JoinResponse.Result
2, // 3: meshservice.Peer.type:type_name -> meshservice.Peer.AnnouncementType
10, // 4: meshservice.RTTResponse.rtts:type_name -> meshservice.RTTResponseInfo
4, // 5: meshservice.Mesh.Begin:input_type -> meshservice.HandshakeRequest
6, // 6: meshservice.Mesh.Join:input_type -> meshservice.JoinRequest
3, // 7: meshservice.Mesh.Peers:input_type -> meshservice.Empty
5, // 8: meshservice.Mesh.Begin:output_type -> meshservice.HandshakeResponse
7, // 9: meshservice.Mesh.Join:output_type -> meshservice.JoinResponse
8, // 10: meshservice.Mesh.Peers:output_type -> meshservice.Peer
8, // [8:11] is the sub-list for method output_type
5, // [5:8] is the sub-list for method input_type
5, // [5:5] is the sub-list for extension type_name
5, // [5:5] is the sub-list for extension extendee
0, // [0:5] is the sub-list for field type_name
}
func init() { file_meshservice_proto_init() }
func file_meshservice_proto_init() {
if File_meshservice_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_meshservice_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Empty); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_meshservice_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*HandshakeRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_meshservice_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*HandshakeResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_meshservice_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*JoinRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_meshservice_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*JoinResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_meshservice_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Peer); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_meshservice_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RTTRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_meshservice_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RTTResponseInfo); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_meshservice_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RTTResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_meshservice_proto_rawDesc,
NumEnums: 3,
NumMessages: 10,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_meshservice_proto_goTypes,
DependencyIndexes: file_meshservice_proto_depIdxs,
EnumInfos: file_meshservice_proto_enumTypes,
MessageInfos: file_meshservice_proto_msgTypes,
}.Build()
File_meshservice_proto = out.File
file_meshservice_proto_rawDesc = nil
file_meshservice_proto_goTypes = nil
file_meshservice_proto_depIdxs = nil
}
|
package net.community.chest.rrd4j.client.test;
import java.io.BufferedReader;
import java.io.PrintStream;
import java.net.URI;
import java.util.Collection;
import java.util.Map;
import net.community.chest.apache.log4j.test.Log4jTester;
import net.community.chest.dom.DOMUtils;
import net.community.chest.rrd4j.client.jmx.AbstractMBeanRrdPoller;
import net.community.chest.rrd4j.client.jmx.http.HttpRrdPollerInstantiator;
import net.community.chest.rrd4j.common.core.RrdDefExt;
import net.community.chest.rrd4j.common.jmx.MBeanRrdDef;
import net.community.chest.test.TestBase;
import net.community.chest.util.logging.LoggerWrapper;
import net.community.chest.util.logging.factory.WrapperFactoryManager;
import org.w3c.dom.Document;
/**
* <P>Copyright 2008 as per GPLv2</P>
*
* @author <NAME>.
* @since Jan 10, 2008 2:29:44 PM
*/
public class MBeanPollerTest extends TestBase {
private MBeanPollerTest ()
{
super();
}
// args[0]=servlet URL, args[1]=configuration file
private static final int testJMXAccessor (
final PrintStream out, final BufferedReader in, final String ... args)
{
final String[] tstArgs=resolveTestParameters(out, in, args,
new String[] { "servlet URL", "configuration file path", "log4j XML path" });
final String urlArg=((null == tstArgs) || (tstArgs.length <= 0)) ? null : tstArgs[0],
xmlPath=((null == tstArgs) || (tstArgs.length <= 1)) ? null : tstArgs[1],
log4jPath=((null == tstArgs) || (tstArgs.length <= 2)) ? null : tstArgs[2];
if ((null == urlArg) || (urlArg.length() <= 0)
|| (null == xmlPath) || (xmlPath.length() <= 0))
return (-1);
if (isQuit(urlArg) || isQuit(xmlPath) || isQuit(log4jPath))
return 0;
if ((log4jPath != null) && (log4jPath.length() > 0))
Log4jTester.log4jInit(out, log4jPath);
try
{
final Document doc=DOMUtils.loadDocument(xmlPath);
final Collection<? extends MBeanRrdDef> defs=MBeanRrdDef.readDefinitions(doc);
final Map<String,? extends Collection<? extends MBeanRrdDef>> cm=RrdDefExt.checkDuplicatePaths(defs);
if ((cm != null) && (cm.size() > 0))
throw new IllegalStateException("Duplicate paths found");
final HttpRrdPollerInstantiator inst=new HttpRrdPollerInstantiator(new URI(urlArg));
final LoggerWrapper log=WrapperFactoryManager.getLogger(MBeanPollerTest.class);
final Collection<? extends Map.Entry<String,? extends AbstractMBeanRrdPoller>> threads=
inst.start(defs, null, log);
final int numThreads=
(null == threads) ? 0 : threads.size();
if (numThreads <= 0)
throw new IllegalStateException("No threads created");
log.info("started " + numThreads + " threads");
for ( ; ; )
{
final String ans=getval(out, in, "(Q)uit");
if (isQuit(ans)) break;
}
for (final Map.Entry<String,? extends AbstractMBeanRrdPoller> te : threads)
{
final AbstractMBeanRrdPoller t=(null == te) ? null : te.getValue();
if (t != null)
{
try
{
t.close();
log.info("stopped thread=" + te.getKey());
}
catch(Exception e)
{
System.err.println(e.getClass().getName() + " while stopping thread=" + te.getKey() + ": " + e.getMessage());
}
}
}
return 0;
}
catch(Exception e)
{
System.err.println(e.getClass().getName() + ": " + e.getMessage());
return (-1);
}
}
//////////////////////////////////////////////////////////////////////////
public static void main (String[] args)
{
final BufferedReader in=getStdin();
// final int nErr=testResourceDownloader(System.out, in, args);
// final int nErr=testJMXServlet(System.out, in, args);
final int nErr=testJMXAccessor(System.out, in, args);
if (nErr != 0)
System.err.println("test failed (err=" + nErr + ")");
else
System.out.println("OK");
}
}
|
# import random module
import random
# Function to generate a random 8-digit number
def generateRandomNumber():
# generate a random 8-digit number
random_number = random.randint(10000000, 99999999)
return random_number
# Call the function
random_number = generateRandomNumber()
# Print the generated number
print("Generated Random Number is:", random_number) |
#!/bin/bash
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eo pipefail
if [[ -z "${CREDENTIALS}" ]]; then
CREDENTIALS=${KOKORO_KEYSTORE_DIR}/73713_docuploader_service_account
fi
if [[ -z "${STAGING_BUCKET_V2}" ]]; then
echo "Need to set STAGING_BUCKET_V2 environment variable"
exit 1
fi
# work from the git root directory
pushd $(dirname "$0")/../../
# install docuploader package
python3 -m pip install gcp-docuploader
# compile all packages
mvn clean install -B -q -DskipTests=true
export NAME=google-cloud-phishingprotection
export VERSION=$(grep ${NAME}: versions.txt | cut -d: -f3)
# V3 generates docfx yml from javadoc
# generate yml
mvn clean site -B -q -P docFX
# copy README to docfx-yml dir and rename index.md
cp README.md target/docfx-yml/index.md
# copy CHANGELOG to docfx-yml dir and rename history.md
cp CHANGELOG.md target/docfx-yml/history.md
pushd target/docfx-yml
# create metadata
python3 -m docuploader create-metadata \
--name ${NAME} \
--version ${VERSION} \
--xrefs devsite://java/gax \
--xrefs devsite://java/google-cloud-core \
--xrefs devsite://java/api-common \
--xrefs devsite://java/proto-google-common-protos \
--xrefs devsite://java/google-api-client \
--xrefs devsite://java/google-http-client \
--xrefs devsite://java/protobuf \
--language java
# upload yml to production bucket
python3 -m docuploader upload . \
--credentials ${CREDENTIALS} \
--staging-bucket ${STAGING_BUCKET_V2} \
--destination-prefix docfx
|
<reponame>jaydeeps-sacumen/prisma-cloud-iac-scan-plugin
package org.jenkinsci.plugins.shell.prismacloud.dto;
public class Issue {
private String severity;
private String name;
private String file;
private String rule;
private String desc;
public String getSeverity() {
return severity;
}
public void setSeverity(String severity) {
this.severity = severity;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getFile() {
return file;
}
public void setFile(String file) {
this.file = file;
}
public String getRule() {
return rule;
}
public void setRule(String rule) {
this.rule = rule;
}
public String getDesc() {
return desc;
}
public void setDesc(String desc) {
this.desc = desc;
}
}
|
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class AppTest {
@Test
public void getName() {
assertThat(new App().getName()).isEqualTo("Hello");
}
} |
#!/bin/sh
set -e
set -u
set -o pipefail
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
# Copy the dSYM into a the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .framework.dSYM "$source")"
binary="${DERIVED_FILES_DIR}/${basename}.framework.dSYM/Contents/Resources/DWARF/${basename}"
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"Mach-O dSYM companion"* ]]; then
strip_invalid_archs "$binary"
fi
if [[ $STRIP_BINARY_RETVAL == 1 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.framework.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.framework.dSYM"
fi
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
STRIP_BINARY_RETVAL=0
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=1
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/KYCForJapan/KYCForJapan.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/KYCForJapan/KYCForJapan.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
#!/bin/bash
function cleanup()
{
local pids=`jobs -p`
if [[ "${pids}" != "" ]]; then
kill ${pids} >/dev/null 2>/dev/null
fi
}
service="all"
if [[ "$1" != "" ]];then
service=$1
fi
trap cleanup EXIT
if [[ "$1" == "bash" ]];then
bash
else
python kubeops.py start ${service}
fi |
<filename>src/context/ThemeContext.js<gh_stars>0
import createDataContext from './createDataContext';
const themeContext = (state, action) => {
switch (action.type) {
case 'TOGGLE_THEME':
return {
...state,
currentTheme: action.payload.oppositeTheme,
};
default:
console.log(`⚠️ Warning! Action ${action.type} not found!`);
}
};
// Get their Theme Preference from
const fetchThemePreference = dispatch => async () => {};
const toggleTheme = dispatch => theme => {
const oppositeTheme = theme === 'DARK' ? 'LIGHT' : 'DARK';
window.localStorage.setItem('currentTheme', oppositeTheme);
dispatch({
type: 'TOGGLE_THEME',
payload: { oppositeTheme },
});
};
export const { Provider, Context } = createDataContext(
themeContext,
{
fetchThemePreference,
toggleTheme,
},
{
currentTheme: window.localStorage.getItem('currentTheme') || 'LIGHT',
},
);
|
////////////////////////////////////////////////////////
// Intro //
////////////////////////////////////////////////////////
var bunyan = require('bunyan');
var log = bunyan.createLogger({
src: true,
name: 'life',
streams: [{
type: 'rotating-file',
path: './log',
count: 100
}, {
stream: process.stdout
}]
});
log.level(bunyan.INFO);
var express = require('express');
var bodyParser = require('body-parser');
var app = express();
app.use(bodyParser());
//app.use(bodyParser.urlencoded({extended: true}));
var uuidGen = require('node-uuid');
var sugar = require('sugar');
////////////////////////////////////////////////////////
// Mod //
////////////////////////////////////////////////////////
// Modifier of any sort. Usually an adpositional phrase.
function Mod(tag, value) {
// A tag, usually a preposition, for this modifier.
//
// Always a string. The null string is interpreted to
// mean "direct object".
this.tag = tag;
// The value of the modifier. Can be rich content.
this.value = value;
}
// Factory method to load a mod from json
function loadMod(json) {
return new Mod(json.tag, json.value);
}
Mod.prototype.toString = function() {
return (this.tag ? this.tag + ' ' : '') + this.value.toString();
}
Mod.prototype.toJson = function() {
return {
tag: this.tag,
value: this.value
};
}
Mod.prototype.toHtml = function() {
return this.toString();
}
////////////////////////////////////////////////////////
// Fact //
////////////////////////////////////////////////////////
function Fact(subject, verb, mods, uuid) {
this.verb = verb;
this.subject = subject;
this.mods = mods
this.uuid = uuid || uuidGen.v4();
};
// Factory method to load a fact from json.
function loadFact(json) {
return new Fact(json.subject, json.verb, json.mods.map(loadMod), json.uuid);
}
Fact.prototype.toString = function() {
var modString = this.mods.length === 0 ? '' : ' ' + this.mods.map(function(mod) {
return mod.toString();
}).join(' ');
return this.subject + ' ' + this.verb + modString + '.';
}
Fact.prototype.toJson = function() {
return {
subject: this.subject,
verb: this.verb,
mods: this.mods.map(function(mod) { return mod.toJson(); }),
uuid: this.uuid
};
}
Fact.prototype.deleteHtml = function() {
return '' +
'<form action="/delete" method="POST" class="delete-form">\n' +
'<input class="delete-uuid" type="hidden" name="uuid" value="' + this.uuid + '" />\n' +
'<input class="delete-button" type="submit" value="X" />\n' +
'</form>'
}
Fact.prototype.toHtml = function() {
return '<li>' + this.deleteHtml() + this.toString() + '</li>\n';
}
////////////////////////////////////////////////////////
// Facts //
////////////////////////////////////////////////////////
function Facts(facts) {
this.facts = facts || [];
}
// Factory method to load facts from json
function loadFacts(json) {
return new Facts(json.map(loadFact));
}
Facts.prototype.push = function(subject, verb, mods) {
this.facts.push(new Fact(subject, verb, mods));
};
Facts.prototype.remove = function(uuid) {
var index = this.facts.findIndex(function(fact, index, facts) {
return uuid == fact.uuid;
});
if ( index >= 0 ) {
this.facts.splice(index, 1);
}
}
Facts.prototype.toString = function() {
return this.facts.join('\n');
}
Facts.prototype.toJson = function() {
return this.facts.map(function(fact) {
return fact.toJson();
});
}
Facts.prototype.toHtml = function() {
var elems = this.facts.map(function(fact) {
return fact.toHtml();
});
return '<ul>\n' + elems.join('') + '</ul>\n';
}
////////////////////////////////////////////////////////
// persistence //
////////////////////////////////////////////////////////
var storeFile = './store';
var fs = require('fs');
// loads the fact store from storeFile, and passes the
// Facts object to the callback cb. async.
function loadStore(cb) {
fs.readFile(storeFile, function(err, data) {
if ( err ) {
log.warn(err, 'no fact store in ' + storeFile);
cb(new Facts());
}
else {
cb(loadFacts(JSON.parse(data.toString())));
}
});
}
// writes the fact store to disk. async.
function writeStore(facts, cb) {
fs.writeFile(storeFile, JSON.stringify(facts.toJson()), function(err) {
if ( err ) {
log.error(err, 'couldn\'t save fact store');
cb();
}
else {
log.info('fact store saved to ' + storeFile);
cb();
}
});
}
////////////////////////////////////////////////////////
// main html //
////////////////////////////////////////////////////////
function mainHtml(facts) {
return '' +
'<html>\n' +
'<head>\n' +
'<meta name="viewport" content="width=device-width" />\n' +
'<script src="/jquery-3.1.0.js"></script>\n' +
'<script src="/main.js"></script>\n' +
'<link rel="stylesheet" type="text/css" href="main.css" />\n' +
'</head>\n' +
'<body>\n' +
'<p>Hallo!</p>\n' +
addFactoidHtml() +
'<p>Here\'s your life story up \'till now:</p>\n' +
facts.toHtml() +
'</body>\n' +
'</html>\n';
}
function addFactoidHtml() {
return '' +
'<form id="add-form" action="/" method="POST">\n' +
'<input class="add-line" id="subject" type="text" name="subject" placeholder="our hero" required autocapitalize="none"/>\n' +
'<input class="add-line" id="verb" type="text" name="verb" placeholder="awoke" required autocapitalize="none" />\n' +
'<br />\n' +
'<div id="mods"></div>\n' +
'<input class="add-line" id="add" type="submit" value="Add" />\n' +
'</form>\n'
}
function mainJson(facts) {
return facts.toJson();
}
////////////////////////////////////////////////////////
// transform //
////////////////////////////////////////////////////////
function transform(value) {
var date = new sugar.Date(value);
log.info(date.isValid());
if ( date.isValid().raw ) {
log.info(true);
return date.full().raw;
}
else {
log.info(false);
return value;
}
}
////////////////////////////////////////////////////////
// main //
////////////////////////////////////////////////////////
function main() {
loadStore(function(facts) {
log.info('loaded facts', facts.toString());
app.get('/', function(req, res) {
res.send(mainHtml(facts));
});
app.get('/state', function(req, res) {
res.send(JSON.stringify(mainJson(facts)));
});
app.post('/add', function(req, res) {
facts.push(req.body.subject, req.body.verb, (req.body.mods ? req.body.mods : []).map(loadMod));
// there's obviously a race condition here, if
// another request comes in while we're writing
// to the file. we should queue up requests that
// come in while we're writing. better yet, we
// should use an actual database or something.
writeStore(facts, function() {
res.send(JSON.stringify({ok: true}));
});
});
app.post('/delete', function(req, res) {
log.info(req.body);
facts.remove(req.body.uuid);
// see /add for race condition disclaimer
writeStore(facts, function() {
res.send(JSON.stringify({ok: true}));
});
});
app.post('/transform', function(req, res) {
log.info('tranforming ' + req.body.id + ': ' + req.body.value);
res.send(JSON.stringify({id: req.body.id, value: transform(req.body.value)}));
});
app.use(express.static('public'));
app.listen(3000, function() {
log.info('beep... beep...');
});
});
};
main();
|
<reponame>AIPHES/ecml-pkdd-2019-J3R-explainable-recommender
package net.librec.spark.math.structure.distributed
/**
* Created by Administrator on 2017-8-26.
*/
class DistributedMatrixLike {
}
|
autoload colors && colors
# cheers, @ehrenmurdick
# http://github.com/ehrenmurdick/config/blob/master/zsh/prompt.zsh
git=`which git`
git_current_branch() {
branch=("$($git symbolic-ref HEAD 2>/dev/null | awk -F/ {'print $NF'})")
if [[ $branch != "" ]]; then
remote=("$($git config branch.$branch.remote | tr -d '\n')")
if [[ $remote != "" ]]; then
echo " on $(color_value $remote/$branch cyan)"
else
echo " on $(color_value $branch cyan)"
fi
else
hash=("$($git rev-parse --short HEAD)")
echo " on $(color_value $hash yellow)"
fi
}
git_stashes() {
stash=${$($git stash list 2>/dev/null | wc -l | tr -d " ")}
if [[ $stash == "0" ]]
then
echo ""
else
echo " {$(color_value $stash cyan)}"
fi
}
color_value() {
if [[ $1 == "0" ]]; then
echo $1
else
echo %{$fg_bold[$2]%}$1%{$reset_color%}
fi
}
git_untracked_changed_staged() {
git_status=("${(f)$($git status --porcelain --untracked-files=all 2>/dev/null | cut -c1-2)}")
untracked=0
changed=0
staged=0
for line ($git_status) {
if [[ $line =~ "[MADRC][MD ]" ]]; then
(( staged = $staged + 1 ))
fi
if [[ $line =~ "\?\?" ]]; then
(( untracked = $untracked + 1 ))
elif [[ $line =~ "[MADRC ][MD]" ]]; then
(( changed = $changed + 1 ))
fi
}
if [[ $untracked == 0 && $changed == 0 && $staged == 0 ]];then
echo ""
else
echo " $(color_value $untracked red):$(color_value $changed red):$(color_value $staged red)"
fi
}
git_dirty() {
st=$($git status 2>/dev/null | wc -l | tr -d " ")
if [[ $st == "0" ]]
then
echo ""
else
echo "$(git_current_branch)$(git_stashes)$(git_untracked_changed_staged)$(git_commits)"
fi
}
git_commits() {
branch=("$($git symbolic-ref HEAD 2>/dev/null | awk -F/ {'print $NF'})")
remote=("$($git config branch.$branch.remote | tr -d '\n')")
if [[ $remote == "" ]]
then
echo ""
else
behind=${$($git rev-list --left-right --count @{upstream}...$branch | awk {'print $1'})}
ahead=${$($git rev-list --left-right --count @{upstream}...$branch | awk {'print $2'})}
if [[ $ahead == "0" && $behind == "0" ]]
then
echo ""
else
echo -n " ["
if [[ $behind != "0" ]]
then
echo -n "-$(color_value $behind cyan)"
if [[ $ahead != "0" ]]
then
echo -n "/"
fi
fi
if [[ $ahead != "0" ]]
then
echo -n "+$(color_value $ahead cyan)"
fi
echo -n "]"
fi
fi
}
location() {
# echo "$(color_value $USERNAME@$HOST cyan)"
if [[ $VAULTED_ENV == "" ]]
then
echo "$(color_value $USERNAME cyan)"
else
echo "$(color_value $USERNAME cyan):$(color_value $VAULTED_ENV red)"
fi
}
directory_name() {
echo "$(color_value $PWD green)"
}
export PS1=$'[$(location):$(directory_name)]$(git_dirty)\n$ '
|
var Metrix = require("./index.js");
var assert = require('assert');
// Constructor
assert.equal(typeof new Metrix(),"object","default constructor");
assert.throws(()=>{new Metrix("udp")},Error);
assert.throws(()=>{new Metrix("udp4")},Error);
assert.throws(()=>{new Metrix("http")},Error);
assert.equal(typeof new Metrix("udp://"),"object");
assert.equal(typeof new Metrix("udp://127.0.0.1"),"object");
assert.equal(typeof new Metrix("udp://127.0.0.1:8094"),"object");
var metrix = new Metrix();
// Test Measurement
assert.equal(metrix.line("M"),"M");
assert.equal(metrix.line("M "),"M\\ ");
assert.equal(metrix.line("M ,"),"M\\ \\,");
assert.equal(metrix.line("M$"),false); // $ is not allowed
assert.equal(metrix.line("M!"),false); // ! is not allowed
assert.equal(metrix.line("M["),false); // [] is not allowed
assert.equal(metrix.line("M]"),false); // [] is not allowed
assert.equal(metrix.line("M\"\""),false); // " is not allowed
assert.equal(metrix.line("Memory Failure"),"Memory\\ Failure");
// Test Tags
// Tag keys and tag values must escape commas, spaces, and equal signs. Use a backslash (\) to escape characters, for example: \ and \,. All tag values are stored as strings and should not be surrounded in quotes.
assert.equal(metrix.line("M",null),"M");
assert.equal(metrix.line("M",null,null),"M");
assert.equal(metrix.line("M",{},null),"M");
assert.equal(metrix.line("M",{domain:'domain1'},null),"M,domain=domain1");
assert.equal(metrix.line("M",{domain:'domain1',cpu:123},null),"M,domain=domain1,cpu=123");
assert.equal(metrix.line("M",{domain:'domain1',cpu:123,rack:'first'},null),"M,domain=domain1,cpu=123,rack=first");
assert.equal(metrix.line("M",{domain:'domain1',cpu:123,"rack nr":'first'},null),"M,domain=domain1,cpu=123,rack\\ nr=first");
assert.equal(metrix.line("M",{domain:'domain1',cpu:123,"rack nr & values":'first'},null),false); // not allowed char inside tag key
assert.equal(metrix.line("M",{domain:'domain1',cpu:123,"rack nr , values":'first'},null),"M,domain=domain1,cpu=123,rack\\ nr\\ \\,\\ values=first");
assert.equal(metrix.line("M",{"domain=1":'domain1'},null),"M,domain\\=1=domain1");
assert.equal(metrix.line("M",{"domain=, 1":'domain1'},null),"M,domain\\=\\,\\ 1=domain1");
assert.equal(metrix.line("M",{"domain$":'domain1'},null),false);
assert.equal(metrix.line("M",{"$domain":'domain1'},null),false);
assert.equal(metrix.line("M",{" domain":'domain1'},null),"M,\\ domain=domain1");
// Fields are key-value metrics associated with the measurement. Every line must have at least one field. Multiple fields must be separated with commas and not spaces.
// Field keys are always strings and follow the same syntactical rules as described above for tag keys and values. Field values can be one of four types. The first value written for a given field on a given measurement defines the type of that field for all series under that measurement.
// Integers are numeric values that do not include a decimal and are followed by a trailing i when inserted (e.g. 1i, 345i, 2015i, -10i). Note that all values must have a trailing i. If they do not they will be written as floats.
// Floats are numeric values that are not followed by a trailing i. (e.g. 1, 1.0, -3.14, 6.0e5, 10).
// Boolean values indicate true or false. Valid boolean strings are (t, T, true, True, TRUE, f, F, false, False and FALSE).
// Strings are text values. All string values must be surrounded in double-quotes ". If the string contains a double-quote, it must be escaped with a backslash, e.g. \".
// Field tests with single value without key
assert.equal(metrix.line("M",{},{}),"M");
assert.equal(metrix.line("M",{},null),"M");
assert.equal(metrix.line("M",{},1),"M value=1");
assert.equal(metrix.line("M",{},3.2),"M value=3.2");
assert.equal(metrix.line("M",{},"a"),"M value=\"a\"");
assert.equal(metrix.line("M",{},"this is a long value"),"M value=\"this is a long value\"");
// Field tests
assert.equal(metrix.line("M",{},{cpu:1,memory:2}),"M cpu=1,memory=2");
assert.equal(metrix.line("M",{},{cpu:1,memory:2}),"M cpu=1,memory=2");
assert.equal(metrix.line("M",{},{cpu:"1",memory:"2"}),"M cpu=\"1\",memory=\"2\"");
assert.equal(metrix.line("M",{},{cpu:1,memory:2}),"M cpu=1,memory=2");
assert.equal(metrix.line("M",{},{cpu:1}),"M cpu=1");
assert.equal(metrix.line("M",{},{"cpu name":1}),"M cpu\\ name=1");
assert.equal(metrix.line("M",{},{"cpu name is very long":1}),"M cpu\\ name\\ is\\ very\\ long=1");
assert.equal(metrix.line("M",{},{"cpu $":1}),false);
assert.equal(metrix.line("M",{},{"cpu.last":1}),"M cpu.last=1");
assert.equal(metrix.line("M",{},{"cpu.last.portion":1}),"M cpu.last.portion=1");
assert.equal(metrix.line("M",{},{"cpu=last":1}),"M cpu\\=last=1");
assert.equal(metrix.line("M",{},{name:"<NAME>"}),"M name=\"<NAME>\"");
assert.equal(metrix.line("M",{},{name:"\"good\" job"}),"M name=\"\\\"good\\\" job\"");
console.log("good job!");
|
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${UNLOCALIZED_RESOURCES_FOLDER_PATH+x} ]; then
# If UNLOCALIZED_RESOURCES_FOLDER_PATH is not set, then there's nowhere for us to copy
# resources to, so exit 0 (signalling the script phase was successful).
exit 0
fi
mkdir -p "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
RESOURCES_TO_COPY=${PODS_ROOT}/resources-to-copy-${TARGETNAME}.txt
> "$RESOURCES_TO_COPY"
XCASSET_FILES=()
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
case "${TARGETED_DEVICE_FAMILY:-}" in
1,2)
TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone"
;;
1)
TARGET_DEVICE_ARGS="--target-device iphone"
;;
2)
TARGET_DEVICE_ARGS="--target-device ipad"
;;
3)
TARGET_DEVICE_ARGS="--target-device tv"
;;
4)
TARGET_DEVICE_ARGS="--target-device watch"
;;
*)
TARGET_DEVICE_ARGS="--target-device mac"
;;
esac
install_resource()
{
if [[ "$1" = /* ]] ; then
RESOURCE_PATH="$1"
else
RESOURCE_PATH="${PODS_ROOT}/$1"
fi
if [[ ! -e "$RESOURCE_PATH" ]] ; then
cat << EOM
error: Resource "$RESOURCE_PATH" not found. Run 'pod install' to update the copy resources script.
EOM
exit 1
fi
case $RESOURCE_PATH in
*.storyboard)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile ${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .storyboard`.storyboardc $RESOURCE_PATH --sdk ${SDKROOT} ${TARGET_DEVICE_ARGS}" || true
ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .storyboard`.storyboardc" "$RESOURCE_PATH" --sdk "${SDKROOT}" ${TARGET_DEVICE_ARGS}
;;
*.xib)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile ${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .xib`.nib $RESOURCE_PATH --sdk ${SDKROOT} ${TARGET_DEVICE_ARGS}" || true
ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .xib`.nib" "$RESOURCE_PATH" --sdk "${SDKROOT}" ${TARGET_DEVICE_ARGS}
;;
*.framework)
echo "mkdir -p ${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" || true
mkdir -p "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" $RESOURCE_PATH ${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" || true
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
;;
*.xcdatamodel)
echo "xcrun momc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH"`.mom\"" || true
xcrun momc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodel`.mom"
;;
*.xcdatamodeld)
echo "xcrun momc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodeld`.momd\"" || true
xcrun momc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodeld`.momd"
;;
*.xcmappingmodel)
echo "xcrun mapc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcmappingmodel`.cdm\"" || true
xcrun mapc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcmappingmodel`.cdm"
;;
*.xcassets)
ABSOLUTE_XCASSET_FILE="$RESOURCE_PATH"
XCASSET_FILES+=("$ABSOLUTE_XCASSET_FILE")
;;
*)
echo "$RESOURCE_PATH" || true
echo "$RESOURCE_PATH" >> "$RESOURCES_TO_COPY"
;;
esac
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_resource "${PODS_ROOT}/../../XGLib/Assets/编组 5@2x.png"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_resource "${PODS_ROOT}/../../XGLib/Assets/编组 5@2x.png"
fi
mkdir -p "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
if [[ "${ACTION}" == "install" ]] && [[ "${SKIP_INSTALL}" == "NO" ]]; then
mkdir -p "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
rm -f "$RESOURCES_TO_COPY"
if [[ -n "${WRAPPER_EXTENSION}" ]] && [ "`xcrun --find actool`" ] && [ -n "${XCASSET_FILES:-}" ]
then
# Find all other xcassets (this unfortunately includes those of path pods and other targets).
OTHER_XCASSETS=$(find -L "$PWD" -iname "*.xcassets" -type d)
while read line; do
if [[ $line != "${PODS_ROOT}*" ]]; then
XCASSET_FILES+=("$line")
fi
done <<<"$OTHER_XCASSETS"
if [ -z ${ASSETCATALOG_COMPILER_APPICON_NAME+x} ]; then
printf "%s\0" "${XCASSET_FILES[@]}" | xargs -0 xcrun actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${!DEPLOYMENT_TARGET_SETTING_NAME}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
else
printf "%s\0" "${XCASSET_FILES[@]}" | xargs -0 xcrun actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${!DEPLOYMENT_TARGET_SETTING_NAME}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}" --app-icon "${ASSETCATALOG_COMPILER_APPICON_NAME}" --output-partial-info-plist "${TARGET_TEMP_DIR}/assetcatalog_generated_info_cocoapods.plist"
fi
fi
|
<html>
<head>
<title>Message Form</title>
<style>
input, textarea {
width: 300px;
margin: 5px;
padding: 5px;
}
</style>
</head>
<body>
<form>
<input type="text" placeholder="Username" id="username" />
<input type="password" placeholder="Password" id="password" />
<textarea placeholder="Message" id="message"></textarea>
<input type="submit" value="Submit" onclick="saveForm()" />
</form>
<script>
function saveForm() {
const username = document.getElementById('username').value;
const password = document.getElementById('password').value;
const message = document.getElementById('message').value;
localStorage.setItem("username", username);
localStorage.setItem("password", password);
localStorage.setItem("message", message);
}
</script>
</body>
</html> |
def transform_text_to_list(text):
"""Transform a given text string into a list of words."""
# remove punctuation
punc_removed = ''.join(c for c in text if c not in "!\"#$%&'()*+,-./:;<=>?@[\]^_`{|}~")
# split on spaces
words_list = punc_removed.split()
return words_list
if __name__ == "__main__":
text = "This is a text string"
print(transform_text_to_list(text)) |
<gh_stars>0
#include "statistics.h"
#include <adiar/internal/pred.h>
#include <adiar/internal/levelized_priority_queue.h>
#include <adiar/internal/reduce.h>
namespace adiar
{
// Define the available function
stats_t adiar_stats()
{
return {
stats_equality,
stats_priority_queue,
stats_reduce,
};
}
// Helper functions for pretty printing (UNIX)
std::ostream& bold_on(std::ostream& os) { return os << "\e[1m"; }
std::ostream& bold_off(std::ostream& os) { return os << "\e[0m"; }
std::ostream& percent(std::ostream& os) { return os << "%"; }
std::ostream& indent(std::ostream& os) { return os << " "; }
std::ostream& endl(std::ostream& os) { return os << std::endl; }
double compute_percent(size_t s, size_t of) { return (static_cast<double>(s) / static_cast<double>(of)) * 100; }
void adiar_printstat(std::ostream &o)
{
o << bold_on << "Adiar statistics" << bold_off << endl;
o << endl;
#ifndef ADIAR_STATS
o << indent << "Not gathered; please compile with 'ADIAR_STATS' and/or 'ADIAR_STATS_EXTRA'." << endl;
#else
o << std::fixed << std::setprecision(2);
o << indent << bold_on << "Equality checking" << bold_off << " (trace)" << endl;
o << indent << indent << "same file " << indent << stats_equality.exit_on_same_file << endl;
o << indent << indent << "node count " << indent << stats_equality.exit_on_nodecount << endl;
o << indent << indent << "var count " << indent << stats_equality.exit_on_varcount << endl;
o << indent << indent << "levels mismatch " << indent << stats_equality.exit_on_levels_mismatch << endl;
o << endl;
o << indent << indent << "O(sort(N)) algorithm " << endl;
o << indent << indent << indent << "runs " << stats_equality.slow_check.runs << endl;
o << indent << indent << indent << "root " << stats_equality.slow_check.exit_on_root << endl;
o << indent << indent << indent << "requests on a level " << stats_equality.slow_check.exit_on_processed_on_level << endl;
o << indent << indent << indent << "child violation " << stats_equality.slow_check.exit_on_children << endl;
o << endl;
o << indent << indent << "O(N/B) algorithm" << endl;
o << indent << indent << indent << "runs " << stats_equality.fast_check.runs << endl;
o << indent << indent << indent << "node mismatch " << stats_equality.fast_check.exit_on_mismatch << endl;
o << endl;
#ifdef ADIAR_STATS_EXTRA
size_t total_pushes = stats_priority_queue.push_bucket + stats_priority_queue.push_overflow;
o << indent << bold_on << "Levelized Priority Queue" << bold_off << endl;
o << indent << indent << "pushes to bucket " << indent << stats_priority_queue.push_bucket
<< " = " << compute_percent(stats_priority_queue.push_bucket, total_pushes) << percent << endl;
o << indent << indent << "pushes to overflow " << indent << stats_priority_queue.push_overflow
<< " = " << compute_percent(stats_priority_queue.push_overflow, total_pushes) << percent << endl;
o << endl;
#endif
size_t total_arcs = stats_reduce.sum_node_arcs + stats_reduce.sum_sink_arcs;
o << indent << bold_on << "Reduce" << bold_off << endl;
o << indent << indent << "input size " << indent << total_arcs << " arcs = " << total_arcs / 2 << " nodes" << endl;
o << indent << indent << indent << "node arcs: " << indent
<< stats_reduce.sum_node_arcs << " = " << compute_percent(stats_reduce.sum_node_arcs, total_arcs) << percent << endl;
o << indent << indent << indent << "sink arcs: " << indent
<< stats_reduce.sum_sink_arcs << " = " << compute_percent(stats_reduce.sum_sink_arcs, total_arcs) << percent << endl;
#ifdef ADIAR_STATS_EXTRA
size_t total_removed = stats_reduce.removed_by_rule_1 + stats_reduce.removed_by_rule_2;
o << indent << indent << "nodes removed " << indent
<< total_removed << " = " << compute_percent(total_removed, total_arcs) << percent << endl;
if (total_removed > 0) {
o << indent << indent << indent << "rule 1: " << indent
<< stats_reduce.removed_by_rule_1 << " = " << compute_percent(stats_reduce.removed_by_rule_1, total_removed) << percent << endl;
o << indent << indent << indent << "rule 2: " << indent
<< stats_reduce.removed_by_rule_2 << " = " << compute_percent(stats_reduce.removed_by_rule_2, total_removed) << percent << endl;
}
#endif
o << endl;
#endif
}
void adiar_statsreset()
{
stats_equality = {};
stats_priority_queue = {};
stats_reduce = {};
}
}
|
/*
* Copyright (c) 2019 dmfs GmbH
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.smoothsync.smoothsetup.microfragments;
import android.content.Context;
import android.net.Uri;
import android.os.Bundle;
import android.os.Parcel;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.Button;
import android.widget.EditText;
import com.google.android.material.textfield.TextInputLayout;
import com.smoothsync.api.model.Provider;
import com.smoothsync.api.model.Service;
import com.smoothsync.smoothsetup.R;
import com.smoothsync.smoothsetup.model.Account;
import com.smoothsync.smoothsetup.model.BasicAccount;
import com.smoothsync.smoothsetup.services.providerservice.functions.ManualProviders;
import com.smoothsync.smoothsetup.utils.AccountDetails;
import com.smoothsync.smoothsetup.utils.AccountDetailsBox;
import com.smoothsync.smoothsetup.utils.AfterTextChangedFlowable;
import org.dmfs.android.microfragments.FragmentEnvironment;
import org.dmfs.android.microfragments.MicroFragment;
import org.dmfs.android.microfragments.MicroFragmentEnvironment;
import org.dmfs.android.microfragments.MicroFragmentHost;
import org.dmfs.android.microfragments.transitions.ForwardTransition;
import org.dmfs.android.microfragments.transitions.Swiped;
import org.dmfs.android.microwizard.MicroWizard;
import org.dmfs.android.microwizard.box.Box;
import org.dmfs.android.microwizard.box.Unboxed;
import org.dmfs.httpessentials.exceptions.ProtocolException;
import org.dmfs.httpessentials.executors.authorizing.UserCredentials;
import org.dmfs.httpessentials.types.Link;
import org.dmfs.iterators.EmptyIterator;
import org.dmfs.iterators.elementary.Seq;
import org.dmfs.jems.optional.Optional;
import org.dmfs.jems.optional.elementary.Present;
import org.dmfs.jems2.optional.Absent;
import org.dmfs.rfc5545.DateTime;
import java.net.InetAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.UnknownHostException;
import java.security.KeyStore;
import java.util.Iterator;
import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.fragment.app.Fragment;
import io.reactivex.rxjava3.android.schedulers.AndroidSchedulers;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.disposables.Disposable;
import io.reactivex.rxjava3.schedulers.Schedulers;
import static io.reactivex.rxjava3.core.Single.just;
import static org.dmfs.jems.optional.elementary.Absent.absent;
/**
* A {@link MicroFragment} that prompts the user to enter a password after {@link Account} has been chosen.
*
* @author <NAME>
*/
public final class ManualSetupSimpleMicroFragment implements MicroFragment<ManualSetupSimpleMicroFragment.Params>
{
private final static Pattern HOST_PATTERN = Pattern.compile("[a-zA-Z0-9.-]+(:\\d+)?(/.*)?");
public final static Creator<ManualSetupSimpleMicroFragment> CREATOR = new Creator<ManualSetupSimpleMicroFragment>()
{
@Override
public ManualSetupSimpleMicroFragment createFromParcel(Parcel source)
{
boolean present = source.readInt() == 1;
return new ManualSetupSimpleMicroFragment(present ? new Present<>(source.readString()) : absent(),
new Unboxed<MicroWizard<AccountDetails>>(source).value());
}
@Override
public ManualSetupSimpleMicroFragment[] newArray(int size)
{
return new ManualSetupSimpleMicroFragment[0];
}
};
@NonNull
private final Optional<String> mUsername;
@NonNull
private final MicroWizard<AccountDetails> mNext;
public ManualSetupSimpleMicroFragment(@NonNull Optional<String> username, @NonNull MicroWizard<AccountDetails> next)
{
mUsername = username;
mNext = next;
}
@NonNull
@Override
public String title(@NonNull Context context)
{
return context.getString(R.string.smoothsync_manual_setup);
}
@Override
public boolean skipOnBack()
{
return false;
}
@NonNull
@Override
public Fragment fragment(@NonNull Context context, @NonNull MicroFragmentHost host)
{
return new ManualSetupFragment();
}
@NonNull
@Override
public Params parameter()
{
return new Params()
{
@Override
public Optional<String> username()
{
return mUsername;
}
@Override
public MicroWizard<AccountDetails> next()
{
return mNext;
}
};
}
@Override
public int describeContents()
{
return 0;
}
@Override
public void writeToParcel(Parcel dest, int flags)
{
dest.writeInt(mUsername.isPresent() ? 1 : 0);
if (mUsername.isPresent())
{
dest.writeString(mUsername.value());
}
dest.writeParcelable(mNext.boxed(), flags);
}
/**
* A Fragment that prompts the user for his or her password.
*/
public final static class ManualSetupFragment extends Fragment implements View.OnClickListener
{
private Params mParams;
private EditText mUri;
private EditText mUsername;
private EditText mPassword;
private MicroFragmentEnvironment<Params> mMicroFragmentEnvironment;
private Disposable mObserverDisposable;
private TextInputLayout mUriInputLayout;
@Override
public void onCreate(@Nullable Bundle savedInstanceState)
{
super.onCreate(savedInstanceState);
mMicroFragmentEnvironment = new FragmentEnvironment<>(this);
mParams = mMicroFragmentEnvironment.microFragment().parameter();
}
@Override
public View onCreateView(LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState)
{
View result = inflater.inflate(R.layout.smoothsetup_microfragment_manual_simple, container, false);
Button button = result.findViewById(R.id.button);
button.setOnClickListener(this);
button.setEnabled(false);
mUriInputLayout = result.findViewById(R.id.uri_input_layout);
mUriInputLayout.setErrorEnabled(true);
mUri = result.findViewById(R.id.url_input);
mUsername = result.findViewById(R.id.username_input);
mPassword = result.findViewById(R.id.password_input);
Optional<String> username = mMicroFragmentEnvironment.microFragment().parameter().username();
if (username.isPresent())
{
String u = username.value();
mUsername.setText(u);
int atIndex = u.indexOf('@');
if (atIndex >= 0)
{
mUri.setText(u.substring(atIndex + 1));
}
}
Flowable<Boolean> addressValid = new AfterTextChangedFlowable(mUri)
.debounce(item -> (item.isEmpty() ? Flowable.empty() : Flowable.timer(1, TimeUnit.SECONDS)))
.map(String::trim)
.onBackpressureLatest()
.switchMapSingle(urlString -> just(urlString)
.subscribeOn(Schedulers.io())
.filter(url -> !url.isEmpty())
.map(uri -> Uri.encode(uri, ":/.%"))
.map(uri -> HOST_PATTERN.matcher(uri).matches() ? URI.create("https://" + uri) : new URI(uri))
.map(this::ensureSecureScheme)
.map(uri -> InetAddress.getByName(uri.getHost()) != null)
.observeOn(AndroidSchedulers.mainThread())
.switchIfEmpty(just(false))
.doOnError(this::showError)
.doOnSuccess(next -> mUriInputLayout.post(() -> mUriInputLayout.setError(null)))
.onErrorReturnItem(false)
);
mObserverDisposable = Flowable.combineLatest(
new AfterTextChangedFlowable(mUsername),
new AfterTextChangedFlowable(mPassword),
addressValid,
(user, password, address) -> !user.isEmpty() && !password.isEmpty() && address)
.onErrorResumeWith(Flowable.just(false))
.observeOn(AndroidSchedulers.mainThread())
.subscribe(
button::setEnabled,
error -> Log.e("ManualSetup", "Error while validating input", error));
return result;
}
private URI ensureSecureScheme(URI uri) throws InsecureSchemeException
{
if ("https".equals(uri.getScheme()))
{
return uri;
}
else
{
throw new InsecureSchemeException();
}
}
private void showError(Throwable error)
{
try
{
throw error;
}
catch (UnknownHostException e)
{
mUriInputLayout.setError(getString(R.string.smoothsetup_error_unknown_host));
}
catch (URISyntaxException e)
{
mUriInputLayout.setError(getString(R.string.smoothsetup_error_invalid_url));
}
catch (InsecureSchemeException e)
{
mUriInputLayout.setError(getString(R.string.smoothsetup_error_insecure_scheme));
}
catch (Throwable e)
{
mUriInputLayout.setError(getString(R.string.smoothsetup_error_other, e.toString()));
}
}
@Override
public void onDestroyView()
{
mObserverDisposable.dispose();
super.onDestroyView();
}
@Override
public void onClick(View v)
{
int id = v.getId();
try
{
if (id == R.id.button)
{
String uriString = mUri.getText().toString().trim();
if (uriString.isEmpty())
{
return;
}
URI uri = new URI(Uri.encode(uriString, ":/.%"));
if (uri.getScheme() != null && !"https".equals(uri.getScheme()))
{
return;
}
if (uri.getScheme() == null)
{
uri = new URI("https://" + uri.toString());
}
verify(uri, mUsername.getText().toString(), mPassword.getText().toString());
}
}
catch (ProtocolException | URISyntaxException e)
{
mMicroFragmentEnvironment.host()
.execute(getActivity(),
new Swiped(
new ForwardTransition<>(
new ErrorRetryMicroFragment(e.getMessage()))));
}
}
private void verify(URI uri, String username, String password) throws ProtocolException
{
// TODO: return a result
// verify entered password
mMicroFragmentEnvironment.host()
.execute(getActivity(),
new Swiped(new ForwardTransition<>(
mParams.next().microFragment(
getActivity(),
new AccountDetails()
{
@Override
public Account account()
{
return new BasicAccount(
mUsername.getText().toString(),
new Provider()
{
@Override
public String id()
{
return ManualProviders.PREFIX + uri.toString();
}
@Override
public String name()
{
return uri.getAuthority();
}
@Override
public String[] domains()
{
return new String[0];
}
@Override
public Iterator<Link> links()
{
return EmptyIterator.instance();
}
@Override
public Iterator<Service> services()
{
return new Seq<>(
new Service()
{
@Override
public String name()
{
return "Contacts";
}
@Override
public String serviceType()
{
return "carddav";
}
@Override
public URI uri()
{
return uri;
}
@Override
public org.dmfs.jems2.Optional<KeyStore> keyStore()
{
return new Absent<>();
}
},
new Service()
{
@Override
public String name()
{
return "Authentication";
}
@Override
public String serviceType()
{
return "com.smoothsync.authenticate";
}
@Override
public URI uri()
{
return uri;
}
@Override
public org.dmfs.jems2.Optional<KeyStore> keyStore()
{
return new Absent<>();
}
},
new Service()
{
@Override
public String name()
{
return "Calendars";
}
@Override
public String serviceType()
{
return "caldav";
}
@Override
public URI uri()
{
return uri;
}
@Override
public org.dmfs.jems2.Optional<KeyStore> keyStore()
{
return new Absent<>();
}
}
);
}
@Override
public DateTime lastModified()
{
return DateTime.now();
}
});
}
@Override
public UserCredentials credentials()
{
return new UserCredentials()
{
@Override
public CharSequence userName()
{
return username;
}
@Override
public CharSequence password()
{
return password;
}
};
}
@Override
public Bundle settings()
{
return Bundle.EMPTY;
}
@Override
public Box<AccountDetails> boxed()
{
return new AccountDetailsBox(this);
}
}))));
}
}
protected interface Params
{
Optional<String> username();
MicroWizard<AccountDetails> next();
}
private final static class InsecureSchemeException extends Exception
{
}
}
|
<filename>spec/services/nomis/api_spec.rb
require 'rails_helper'
RSpec.describe Nomis::Api do
subject { described_class.instance }
# Ensure that we have a new instance to prevent other specs interfering
around do |ex|
Singleton.__init__(described_class)
ex.run
Singleton.__init__(described_class)
end
it 'is implicitly enabled if the api host is configured' do
expect(Rails.configuration).to receive(:prison_api_host).and_return(nil)
expect(described_class.enabled?).to be false
expect(Rails.configuration).to receive(:prison_api_host).and_return('http://example.com/')
expect(described_class.enabled?).to be true
end
it 'fails if code attempts to use the client when disabled' do
expect(described_class).to receive(:enabled?).and_return(false)
expect {
described_class.instance
}.to raise_error(Nomis::Error::Disabled, 'Nomis API is disabled')
end
describe 'lookup_active_prisoner', vcr: { cassette_name: :lookup_active_prisoner } do
let(:params) {
{
noms_id: 'G7244GR',
date_of_birth: Date.parse('1966-11-22')
}
}
let(:prisoner) { subject.lookup_active_prisoner(params) }
it 'returns and prisoner if the data matches' do
expect(prisoner).to be_kind_of(Nomis::Prisoner)
expect(prisoner.nomis_offender_id).to eq(1_502_035)
expect(prisoner.noms_id).to eq('G7244GR')
end
it 'returns NullPrisoner if the data does not match', vcr: { cassette_name: :lookup_active_prisoner_nomatch } do
params[:noms_id] = 'Z9999ZZ'
expect(prisoner).to be_instance_of(Nomis::NullPrisoner)
end
it 'returns NullPrisoner if an ApiError is raised', :expect_exception do
allow_any_instance_of(Nomis::Client).to receive(:get).and_raise(Nomis::APIError)
expect(prisoner).to be_instance_of(Nomis::NullPrisoner)
expect(prisoner).not_to be_api_call_successful
end
it 'logs the lookup result, api lookup time' do
prisoner
expect(PVB::Instrumentation.custom_log_items[:api]).to be > 1
expect(PVB::Instrumentation.custom_log_items[:valid_prisoner_lookup]).to be true
end
describe 'with no matching prisoner', vcr: { cassette_name: :lookup_active_prisoner_nomatch } do
before do
params[:noms_id] = 'Z9999ZZ'
end
it 'returns nil if the data does not match' do
expect(prisoner).to be_instance_of(Nomis::NullPrisoner)
end
it 'logs the prisoner was unsucessful' do
prisoner
expect(PVB::Instrumentation.custom_log_items[:valid_prisoner_lookup]).to be false
end
end
end
describe '#lookup_prisoner_details' do
let(:prisoner_details) { described_class.instance.lookup_prisoner_details(noms_id: noms_id) }
context 'when found', vcr: { cassette_name: :lookup_prisoner_details } do
let(:noms_id) { 'G7244GR' }
it 'serialises the response into a prisonwe' do
expect(prisoner_details).
to have_attributes(
given_name: "UDFSANAYE",
surname: "KURTEEN",
date_of_birth: Date.parse('1966-11-22'),
aliases: [],
gender: { 'code' => 'M', 'desc' => 'Male' },
convicted: true,
imprisonment_status: { "code" => "SENT03", "desc" => "Adult Imprisonment Without Option CJA03" },
iep_level: { "code" => "ENH", "desc" => "Enhanced" }
)
end
it 'instruments the request' do
prisoner_details
expect(PVB::Instrumentation.custom_log_items[:valid_prisoner_details_lookup]).to be true
end
end
context 'when an unknown prisoner', :expect_exception, vcr: { cassette_name: :lookup_prisoner_details_unknown_prisoner } do
let(:noms_id) { 'G999999' }
it { expect { prisoner_details }.to raise_error(Nomis::APIError) }
end
context 'when given an invalid nomis id', :expect_exception, vcr: { cassette_name: :lookup_offender_details_invalid_noms_id } do
let(:noms_id) { 'RUBBISH' }
it { expect { prisoner_details }.to raise_error(Nomis::APIError) }
end
end
describe '#lookup_prisoner_location' do
let(:establishment) { subject.lookup_prisoner_location(noms_id: noms_id) }
context 'when found', vcr: { cassette_name: :lookup_prisoner_location } do
let(:noms_id) { 'G7244GR' }
it 'returns a Location' do
expect(establishment).to be_valid
expect(establishment.code).to eq 'LEI'
end
it 'has the internal location' do
expect(establishment).to have_attributes(housing_location: instance_of(Nomis::HousingLocation))
expect(establishment.housing_location.description).to eq 'LEI-F-3-005'
end
end
context 'with an unknown offender', :expect_exception, vcr: { cassette_name: :lookup_prisoner_location_for_unknown_prisoner } do
let(:noms_id) { 'G999999' }
it { expect { establishment }.to raise_error(Nomis::APIError) }
end
context 'with an invalid nomis_id', :expect_exception, vcr: { cassette_name: :lookup_prisoner_location_for_bogus_prisoner } do
let(:noms_id) { 'BOGUS' }
it { expect { establishment }.to raise_error(Nomis::APIError) }
end
end
describe 'prisoner_visiting_availability', vcr: { cassette_name: :prisoner_visiting_availability } do
let(:params) {
{
offender_id: 1_502_035,
start_date: '2020-10-15',
end_date: '2020-10-25'
}
}
context 'when the prisoner has availability' do
subject { super().prisoner_visiting_availability(params) }
it 'returns availability info containing a list of available dates' do
expect(subject).to be_kind_of(Nomis::PrisonerAvailability)
expect(subject.dates.first).to eq(Date.parse('2020-10-15'))
end
it 'logs the number of available dates' do
expect(subject.dates.count).to eq(PVB::Instrumentation.custom_log_items[:prisoner_visiting_availability])
end
end
context 'when the prisoner has no availability' do
# This spec has to have a hard coded date as an offender MUST be unavailable on a specific date in order for this to
# pass. Unfortunately we are unable to use 'travel_to' and go to the past as the JWT token skew is too large. If this
# test needs updating a new date will need to be added and updated as part of the VCR being recorded
let(:params) {
{
offender_id: 1_502_035,
start_date: Date.parse('2020-10-18'),
end_date: Date.parse('2020-10-18')
}
}
subject { super().prisoner_visiting_availability(params) }
it 'returns empty list of available dates if there is no availability', vcr: { cassette_name: :prisoner_visiting_availability_noavailability } do
expect(subject).to be_kind_of(Nomis::PrisonerAvailability)
expect(subject.dates).to be_empty
end
end
end
describe 'prisoner_visiting_detailed_availability', vcr: { cassette_name: :prisoner_visiting_detailed_availability } do
let(:slot1) { ConcreteSlot.new(2020, 10, 15, 10, 0, 11, 0) }
let(:slot2) { ConcreteSlot.new(2020, 10, 16, 10, 0, 11, 0) }
let(:slot3) { ConcreteSlot.new(2020, 10, 17, 10, 0, 11, 0) }
let(:params) do
{
offender_id: 1_502_035,
slots: [slot1, slot2, slot3]
}
end
subject { super().prisoner_visiting_detailed_availability(params) }
it 'returns availability info containing a list of available dates' do
expect(subject).to be_kind_of(Nomis::PrisonerDetailedAvailability)
expect(subject.dates.map(&:date)).
to contain_exactly(slot1.to_date, slot2.to_date, slot3.to_date)
end
it 'logs the number of available slots' do
subject
expect(PVB::Instrumentation.custom_log_items[:prisoner_visiting_availability]).to eq(3)
end
end
describe 'fetch_bookable_slots', vcr: { cassette_name: :fetch_bookable_slots } do
# There have been issues with the visit slots for Leeds in T3 and therefore we have switched to use The Verne
# for this spec
let(:params) {
{
prison: instance_double(Prison, nomis_id: 'VEI'),
start_date: '2020-10-14',
end_date: '2020-10-24'
}
}
subject { super().fetch_bookable_slots(params) }
it 'returns an array of slots' do
expect(subject.first.time.iso8601).to eq("2020-10-14T14:00/16:00")
end
it 'logs the number of available slots' do
expect(subject.count).to eq(PVB::Instrumentation.custom_log_items[:slot_visiting_availability])
end
end
describe 'fetch_contact_list', vcr: { cassette_name: :fetch_contact_list } do
let(:params) do
{
offender_id: 1_502_035
}
end
let(:first_contact) do
Nomis::Contact.new(
id: 2_996_406,
given_name: 'AELAREET',
surname: 'ANTOINETTE',
date_of_birth: '1990-09-22',
gender: { code: "M", desc: "Male" },
active: true,
approved_visitor: true,
relationship_type: { code: "SON", desc: "Son" },
contact_type: {
code: "S",
desc: "Social/ Family"
},
restrictions: []
)
end
subject { super().fetch_contact_list(params) }
it 'returns an array of contacts' do
expect(subject).to have_exactly(27).items
end
it 'parses the contacts' do
expect(subject.map(&:id)).to include(first_contact.id)
end
end
end
|
#! /usr/bin/env bash
set -euo pipefail
# Args
nix_path=$1
config=$2
config_pwd=$3
shift
shift
shift
# Building the command
command=(nix-instantiate --show-trace --expr '
{ system, configuration, ... }:
let
os = import <nixpkgs/nixos> { inherit system configuration; };
inherit (import <nixpkgs/lib>) concatStringsSep;
in {
substituters = concatStringsSep " " os.config.nix.binaryCaches;
trusted-public-keys = concatStringsSep " " os.config.nix.binaryCachePublicKeys;
drv_path = os.system.drvPath;
out_path = os.system;
}')
if [[ -f "$config" ]]; then
config=$(readlink -f "$config")
command+=(--argstr configuration "$config")
else
command+=(--arg configuration "$config")
fi
# add all extra CLI args as extra build arguments
command+=("$@")
# Setting the NIX_PATH
if [[ -n "$nix_path" && "$nix_path" != "-" ]]; then
export NIX_PATH=$nix_path
fi
# Changing directory
cd "$(readlink -f "$config_pwd")"
# Instantiate
echo "running (instantiating): ${NIX_PATH:+NIX_PATH=$NIX_PATH} ${command[*]@Q}" -A out_path >&2
"${command[@]}" -A out_path >/dev/null
# Evaluate some more details,
# relying on preceding "Instantiate" command to perform the instantiation,
# because `--eval` is required but doesn't instantiate for some reason.
echo "running (evaluating): ${NIX_PATH:+NIX_PATH=$NIX_PATH} ${command[*]@Q}" --eval --strict --json >&2
"${command[@]}" --eval --strict --json
|
<filename>src/main/java/com/supanadit/restsuite/model/RequestTypeModel.java<gh_stars>10-100
package com.supanadit.restsuite.model;
public class RequestTypeModel {
protected String name;
public RequestTypeModel(String name) {
this.name = name;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public static RequestTypeModel GET() {
return new RequestTypeModel("GET");
}
public static RequestTypeModel POST() {
return new RequestTypeModel("POST");
}
public static RequestTypeModel DELETE() {
return new RequestTypeModel("DELETE");
}
public static RequestTypeModel PUT() {
return new RequestTypeModel("PUT");
}
}
|
#!/bin/bash
cd src/SpliceMap-src
make install CC="${CXX}" CFLAGS-64="${CXXFLAGS} -m64 -O3 -Wall"
mkdir -p "${PREFIX}/bin"
cp \
SpliceMap \
runSpliceMap \
sortsam \
nnrFilter \
neighborFilter \
uniqueJunctionFilter \
randomJunctionFilter \
wig2barwig \
colorJunction \
subseq \
findNovelJunctions \
statSpliceMap \
countsam \
amalgamateSAM \
precipitateSAM \
"${PREFIX}/bin/"
|
package main
import (
"context"
"encoding/json"
"fmt"
"net/http"
"os"
"github.com/gorilla/mux"
)
type Calculator struct {
leftOperand int
rightOperand int
}
func (c *Calculator) calculate(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
// calculate result using left operand, right operand
// and operation type
switch r.Method {
case http.MethodGet:
result := c.leftOperand + c.rightOperand
rr := json.NewEncoder(w).Encode(result)
if rr != nil {
fmt.Errorf("Error: %v\n", rr)
os.Exit(1)
}
case http.MethodPost:
// take input from multiple sources
// and do math calculations
// use goroutines to distribute workload
// and scale application
default:
_, _ = fmt.Fprint(w, "Operation not supported\n")
}
}
func main() {
router := mux.NewRouter()
calculator := &Calculator{3, 2}
router.HandleFunc("/calculate", calculator.calculate).Methods("GET", "POST")
http.ListenAndServe(":8000", router)
} |
// Method to convert celsius to fahrenheit
public static double celsiusToFahrenheit(double celsius) {
double fahrenheit = (celsius * 9 /5) + 32;
return fahrenheit;
} |
import re
def validate_email(email):
if len(email) > 7:
if re.match("^.+@([?)[a-zA-Z0-9-.]+.([a-zA-Z]{2,3}|[0-9]{1,3})(]?)$", email) != None:
return True
return False
email = input('Please enter your email: ')
if validate_email(email):
print('Valid email')
else:
print('Invalid email') |
def intersection(arr1, arr2)
arr1 & arr2
end
arr1 = [3, 7, 8, 10]
arr2 = [1, 5, 7, 8, 10]
puts intersection(arr1, arr2) |
/**
* Contains components that display images.
*/
package io.opensphere.controlpanels.component.image;
|
# CS with respect to the true image
python -m mrina.save_imgs --numsamples 100 \
--maindir ./ \
--recdir ./CS/ \
--maskdir ./ \
--outputdir ./OUT/01_imgs/ \
--savetrue \
--savemask \
--saverec \
--savenoise \
--usetrueasref \
--printlevel 1 \
--savelin \
--limits 0.0 184.2392788833003 -1.0776501893997192 1.0873665809631348 -1.146713376045227 1.4400959014892578 -1.2205644845962524 1.3197449445724487
# CS with respect to the avg image
python -m mrina.save_imgs --numsamples 100 \
--maindir ./ \
--recdir ./CS/ \
--maskdir ./ \
--outputdir ./OUT/01_imgs/ \
--savetrue \
--savemask \
--saverec \
--savenoise \
--printlevel 1 \
--savelin \
--limits 0.0 184.2392788833003 -1.0776501893997192 1.0873665809631348 -1.146713376045227 1.4400959014892578 -1.2205644845962524 1.3197449445724487
# CSDEB with respect to the true image
python -m mrina.save_imgs --numsamples 100 \
--maindir ./ \
--recdir ./CSDEB/ \
--maskdir ./ \
--outputdir ./OUT/01_imgs/ \
--savetrue \
--savemask \
--saverec \
--savenoise \
--usetrueasref \
--printlevel 1 \
--savelin \
--limits 0.0 184.2392788833003 -1.0776501893997192 1.0873665809631348 -1.146713376045227 1.4400959014892578 -1.2205644845962524 1.3197449445724487
# CSDEB with respect to the avg image
python -m mrina.save_imgs --numsamples 100 \
--maindir ./ \
--recdir ./CSDEB/ \
--maskdir ./ \
--outputdir ./OUT/01_imgs/ \
--savetrue \
--savemask \
--saverec \
--savenoise \
--printlevel 1 \
--savelin \
--limits 0.0 184.2392788833003 -1.0776501893997192 1.0873665809631348 -1.146713376045227 1.4400959014892578 -1.2205644845962524 1.3197449445724487
# OMP with respect to the true image
python -m mrina.save_imgs --numsamples 100 \
--maindir ./ \
--recdir ./OMP/ \
--maskdir ./ \
--outputdir ./OUT/01_imgs/ \
--savetrue \
--savemask \
--saverec \
--savenoise \
--usetrueasref \
--printlevel 1 \
--savelin \
--limits 0.0 184.2392788833003 -1.0776501893997192 1.0873665809631348 -1.146713376045227 1.4400959014892578 -1.2205644845962524 1.3197449445724487
# OMP with respect to the avg image
python -m mrina.save_imgs --numsamples 100 \
--maindir ./ \
--recdir ./OMP/ \
--maskdir ./ \
--outputdir ./OUT/01_imgs/ \
--savetrue \
--savemask \
--saverec \
--savenoise \
--printlevel 1 \
--savelin \
--limits 0.0 184.2392788833003 -1.0776501893997192 1.0873665809631348 -1.146713376045227 1.4400959014892578 -1.2205644845962524 1.3197449445724487
|
#!/usr/bin/env python
"""Translate the Book-Crossing dataset to JSON.
This script takes the various Book-Crossing data files and write them out as
JSON. It removes users that have no ratings as well as ratings of books that do
not exist.
Attributes:
RATINGS (dict): A dictionary that stores information from all of
the rating actions taken by the users in the dataset. The
variables are as follows:
- user_id (int): A unique identifier for each user.
- book_id (int): A unique identifier for each book.
- rating (int): The user's rating for a book,
from 1 to 10. None if an implicit rating.
- implicit (bool): True if the rating is "implicit", that is, just
an interaction with the book instead of a numeric rating.
BOOKS (dict): A dictionary that stores information about all the books in
the dataset. The variables are as follows:
- book_id (str): A unique identifier for each book.
- title (str): The title of the book.
- author (str): The author of the book.
- year (int): The year the book was published.
- publisher (str): The publisher of the book.
USERS (dict): A dictionary that stores information about the users. The
variables are as follows:
- user_id (int): A unique identifier for each user.
- location (str): A location, often of the form "city, state,
country".
- age (int): The age of the user in years; can be None.
"""
from copy import deepcopy
import json
import csv
# JSON rating object
RATINGS = {
"user_id": None,
"book_id": None,
"rating": None,
"implicit": None,
}
# JSON book object
BOOKS = {
"book_id": None,
"title": None,
"author": None,
"year": None,
"publisher": None,
}
# JSON user object
USERS = {
"user_id": None,
"location": None,
"age": None,
}
def convert_str(string):
"""Convert a string from 'iso-8859-1' to 'utf8'."""
return string.decode('iso-8859-1').encode('utf8')
def iter_lines(open_file):
"""Open the Book-Crossing CSVs and return an iterator over the lines.
Args:
open_file: A file handle object from open().
Retunrs:
iterator: An iterator over each line in the file. Each line is a list,
with string elements for each column value.
"""
reader = csv.reader(
open_file,
delimiter=';',
doublequote=False,
escapechar="\\",
)
next(reader) # Skip the header
return reader
def parse_user_line(line):
"""Parse a line from the user CSV file.
A line is a list of strings as follows:
line = [
user_id,
location,
age,
]
Args:
lines (list): A list of strings as described above.
Returns:
dict: A dictionary containing the keys "user_id", "location", and
"age".
"""
(user, location, age) = line
current_user = deepcopy(USERS)
current_user["user_id"] = int(user)
current_user["location"] = convert_str(location)
# Sometimes the age is "NULL", which we handle by leaving the
# value as None
try:
current_user["age"] = int(age)
except ValueError:
pass
return current_user
def parse_rating_line(line):
"""Parse a line from the ratings CSV file.
A line is a list of strings as follows:
line = [
user_id,
book_id,
rating,
]
Args:
lines (list): A list of strings as described above.
Returns:
dict: A dictionary containing the keys "user_id", "book_id", "rating",
and "implicit".
"""
(user, book, rating) = line
current_rating = deepcopy(RATINGS)
current_rating["user_id"] = int(user)
current_rating["book_id"] = convert_str(book)
rating = int(rating)
if rating == 0:
current_rating["implicit"] = True
else:
current_rating["rating"] = rating
return current_rating
def parse_book_line(line):
"""Parse a line from the book CSV file.
A line is a list of strings as follows:
line = [
book,
title,
author,
year,
publisher,
small_cover_url,
medium_cover_url,
large_cover_url,
]
Args:
lines (list): A list of strings as described above.
Returns:
dict: A dictionary containing the keys "book_id", "title", "author",
"year", and "publisher".
"""
# We throw out the three images from Amazon (hence the _,_,_)
(book, title, author, year, publisher, _, _, _) = line
current_book = deepcopy(BOOKS)
current_book["book_id"] = convert_str(book)
current_book["title"] = convert_str(title)
current_book["author"] = convert_str(author)
current_book["year"] = int(year)
current_book["publisher"] = convert_str(publisher)
return current_book
if __name__ == "__main__":
import argparse
# Set up command line flag handling
parser = argparse.ArgumentParser(
description="Transform the Book-Crossing datasets to JSON",
)
parser.add_argument(
'ratings',
type=str,
help="the file containing the ratings, normally 'BX-Book-Ratings.csv'",
)
parser.add_argument(
'users',
type=str,
help="the file containing the users, normally 'BX-Users.csv'",
)
parser.add_argument(
'books',
type=str,
help="the file containing the books, normally 'BX-Books.csv'",
)
parser.add_argument(
'-o',
'--output-directory',
type=str,
action="store",
help="the directory to save the output JSON files, by default the current directory",
default="./",
)
args = parser.parse_args()
# There are two cases of "bad" data that we want to remove:
#
# 1. Ratings that do not match to a valid user or book
# 2. Users who have no ratings after the above rule has been applied
# Find valid books
valid_books = []
book_data = []
with open(args.books, 'rb') as csvfile:
for line in iter_lines(csvfile):
ret = parse_book_line(line)
valid_books.append(ret["book_id"])
book_data.append(ret)
# Find valid users
valid_users = []
users_data = []
with open(args.users, 'rb') as csvfile:
for line in iter_lines(csvfile):
ret = parse_user_line(line)
valid_users.append(ret["user_id"])
users_data.append(ret)
# Save only ratings that have a valid book and a valid user. Additionally,
# save the users and books saved to filter the books and user files later.
valid_books = set(valid_books)
valid_users = set(valid_users)
rated_users = []
with\
open(args.ratings, 'rb') as csvfile,\
open("book-crossing_implicit_ratings.json", 'w') as imp,\
open("book-crossing_explicit_ratings.json", 'w') as exp:
for line in iter_lines(csvfile):
ret = parse_rating_line(line)
if ret["book_id"] in valid_books and ret["user_id"] in valid_users:
rated_users.append(ret["user_id"])
# Separate the two types of ratings; they can both be
# read in on Spark if the user wants both.
if ret["implicit"]:
imp.write(json.dumps(ret) + '\n')
else:
exp.write(json.dumps(ret) + '\n')
# Only save users that have at least one rating saved to the ratings
# outputs.
rated_and_valid_users = set(rated_users)
with open("book-crossing_books.json", 'w') as f:
for ret in book_data:
f.write(json.dumps(ret) + '\n')
with open("book-crossing_users.json", 'w') as f:
for ret in users_data:
if ret["user_id"] in rated_and_valid_users:
f.write(json.dumps(ret) + '\n')
|
package com.decathlon.ara.service.transformer;
import com.decathlon.ara.domain.RootCause;
import com.decathlon.ara.service.dto.rootcause.RootCauseDTO;
import org.assertj.core.api.Assertions;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.junit.MockitoJUnitRunner;
@RunWith(MockitoJUnitRunner.class)
public class RootCauseTransformerTest {
@InjectMocks
private RootCauseTransformer cut;
@Test
public void toDto_should_transform_object() {
// Given
RootCause value = new RootCause(1L, 23L, "name", null);
// When
RootCauseDTO rootCauseDTO = cut.toDto(value);
// Then
Assertions.assertThat(rootCauseDTO).isNotNull();
Assertions.assertThat(rootCauseDTO.getId()).isEqualTo(1L);
Assertions.assertThat(rootCauseDTO.getName()).isEqualTo("name");
}
@Test
public void toDto_should_return_empty_object_on_null_value() {
// When
RootCauseDTO rootCauseDTO = cut.toDto(null);
// Then
Assertions.assertThat(rootCauseDTO).isNotNull();
Assertions.assertThat(rootCauseDTO.getId()).isEqualTo(0L);
Assertions.assertThat(rootCauseDTO.getName()).isNull();
}
} |
def wordBreak(s, wordDict):
dp = [0]*(len(s)+1)
dp[0] = 1
for i in range(1,len(s)+1):
for j in range(i):
if dp[j] and s[j:i] in wordDict:
dp[i] = 1
break
return dp[len(s)] == 1 |
<reponame>fairix/kerkow2
/**
* digitvision
*
* @category digitvision
* @package Shopware\Plugins\DvsnProductOption
* @copyright (c) 2020 digitvision
*/
import './module/dvsn-product-option';
|
/*-----------------------------------------------------------------------------------
Template Name: Sell Bazar
Author: <NAME>
Author URI:
-----------------------------------------------------------------------------------
Javascript INDEX
===================
1. Side var collapse Javascript
2. Sparkline chart apply
3. User Management Javascript
4. Button delete value apply
5. Ctx category, sub category, item, unit chart
6. Number count animation
-----------------------------------------------------------------------------------*/
/*----------------------------------------*/
/* 1. Side var collapse Javascript
/*----------------------------------------*/
$(document).ready(function () {
$('#sidebarCollapse').on('click', function () {
$('#sidebar').toggleClass('toggled');
});
});
/*----------------------------------------*/
/* 2. Sparkline chart apply
/*----------------------------------------*/
$("#sparkline1").sparkline([34, 43, 43, 35, 44, 32, 44, 52, 25], {
type: 'line',
lineColor: '#17997f',
lineWidth: 1,
barSpacing: '100px',
fillColor: '#03a9f4',
});
$("#sparkline2").sparkline([-4, -2, 2, 0, 4, 5, 6, 7], {
type: 'bar',
barColor: '#03a9f4',
negBarColor: '#303030'
});
$("#sparkline6").sparkline([4, 6, 7, 7, 4, 3, 2, 1, 4, 4, 5, 6, 3, 4, 5, 8, 7, 6, 9, 3, 2, 4, 1, 5, 6, 4, 3, 7,], {
type: 'discrete',
lineColor: '#03a9f4'
});
/*----------------------------------------*/
/* 3. User Management Javascript
/*----------------------------------------*/
$(document).ready(function () {
$("#myInput").on("keyup", function () {
let value = $(this).val().toLowerCase();
$("#myList li").filter(function () {
$(this).toggle($(this).text().toLowerCase().indexOf(value) > -1)
});
});
});
function UserPermission(dataValue) {
$(".list-group-item").removeClass('ListStyle');
$("#userPermissionShow").css({'display': 'block'});
let userName = $(".user" + dataValue).text();
$(".addUserName").html(userName);
$("#addUserNameValue").val(dataValue);
$("#design" + dataValue).addClass('ListStyle');
let permissionName = ['Category', 'Costumer', 'Supplier', 'Unit', 'Product', 'Purchase Cash Return', 'Repair', 'Sales Cash Return', 'Sales Repair', 'Damage', 'User Management Report', 'Sales Management Report', 'Purchase Management Report', 'Inventory Management Report', 'Purchase', 'Sales', 'Purchase Damage'];
let permissionValue = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17];
let result = "";
let CSRF_TOKEN = $('meta[name="csrf-token"]').attr('content');
$.ajax({
url: 'getRequest',
type: 'POST',
data: {_token: CSRF_TOKEN, data: dataValue},
dataType: 'JSON',
success: function (data) {
for (let i = 0; i < permissionName.length; i++) {
let a = data.indexOf(permissionValue[i].toString());
if (a >= 0) {
result += ' <tr>\n' +
' <td class="w-75">' + permissionName[i] + '</td>\n' +
' <td>\n' +
' <label class="switch">\n' +
' <input type="checkbox" name="checkbox[]" value="' + permissionValue[i] + '" onclick="checkboxCall(' + permissionValue[i] + ')" class="LogInputRem' + permissionValue[i] + '" checked>\n' +
' <span class="slider round"><i>✔</i></span>\n' +
' </label>\n' +
' </td>\n' +
' </tr>';
} else {
result += ' <tr>\n' +
' <td class="w-75">' + permissionName[i] + '</td>\n' +
' <td>\n' +
' <label class="switch">\n' +
' <input type="checkbox" name="checkbox[]" value="' + permissionValue[i] + '" onclick="checkboxCall(' + permissionValue[i] + ')" class="LogInputRem' + permissionValue[i] + '">\n' +
' <span class="slider round"><i>✔</i></span>\n' +
' </label>\n' +
' </td>\n' +
' </tr>';
}
}
$(".addPermissionTable").html(result);
}
});
}
function checkboxCall(value) {
let CSRF_TOKEN = $('meta[name="csrf-token"]').attr('content');
let userId = $("#addUserNameValue").val();
if ($('.LogInputRem' + value).prop('checked')) {
$.ajax({
url: 'permissionStore',
type: 'POST',
data: {_token: CSRF_TOKEN, userid: userId, permissionid: value},
dataType: 'JSON',
success: function (data) {
alert(data);
}
});
}
else {
$.ajax({
url: 'permissionDelete',
type: 'POST',
data: {_token: CSRF_TOKEN, userid: userId, permissionid: value},
dataType: 'JSON',
success: function (data) {
alert(data);
}
});
}
}
/*----------------------------------------*/
/* 4. Button delete value apply
/*----------------------------------------*/
$('#deleteModal').on('show.bs.modal', function (e) {
$(this).find('.btn-ok').attr('href', $(e.relatedTarget).data('href'));
});
/*----------------------------------------*/
/* 5. Ctx category, sub category, item, unit chart
/*----------------------------------------*/
let ctx = document.getElementById("widgetChart1");
ctx.height = 150;
let myChart = new Chart(ctx, {
type: 'line',
data: {
labels: ['January', 'February', 'March', 'April', 'May', 'June', 'July'],
type: 'line',
datasets: [{
data: [65, 59, 84, 84, 51, 55, 40],
label: 'Dataset',
backgroundColor: 'transparent',
borderColor: 'rgba(255,255,255,.55)',
},]
},
options: {
maintainAspectRatio: false,
legend: {
display: false
},
responsive: true,
tooltips: {
mode: 'index',
titleFontSize: 12,
titleFontColor: '#000',
bodyFontColor: '#000',
backgroundColor: '#fff',
titleFontFamily: 'Montserrat',
bodyFontFamily: 'Montserrat',
cornerRadius: 3,
intersect: false,
},
scales: {
xAxes: [{
gridLines: {
color: 'transparent',
zeroLineColor: 'transparent'
},
ticks: {
fontSize: 2,
fontColor: 'transparent'
}
}],
yAxes: [{
display: false,
ticks: {
display: false,
}
}]
},
title: {
display: false,
},
elements: {
line: {
borderWidth: 1
},
point: {
radius: 4,
hitRadius: 10,
hoverRadius: 4
}
}
}
});
//WidgetChart 2
ctx = document.getElementById("widgetChart2");
ctx.height = 150;
myChart = new Chart(ctx, {
type: 'line',
data: {
labels: ['January', 'February', 'March', 'April', 'May', 'June', 'July'],
type: 'line',
datasets: [{
data: [1, 18, 9, 17, 34, 22, 11],
label: 'Dataset',
backgroundColor: '#63c2de',
borderColor: 'rgba(255,255,255,.55)',
},]
},
options: {
maintainAspectRatio: false,
legend: {
display: false
},
responsive: true,
tooltips: {
mode: 'index',
titleFontSize: 12,
titleFontColor: '#000',
bodyFontColor: '#000',
backgroundColor: '#fff',
titleFontFamily: 'Montserrat',
bodyFontFamily: 'Montserrat',
cornerRadius: 3,
intersect: false,
},
scales: {
xAxes: [{
gridLines: {
color: 'transparent',
zeroLineColor: 'transparent'
},
ticks: {
fontSize: 2,
fontColor: 'transparent'
}
}],
yAxes: [{
display: false,
ticks: {
display: false,
}
}]
},
title: {
display: false,
},
elements: {
line: {
tension: 0.00001,
borderWidth: 1
},
point: {
radius: 4,
hitRadius: 10,
hoverRadius: 4
}
}
}
});
//WidgetChart 3
ctx = document.getElementById("widgetChart3");
ctx.height = 70;
myChart = new Chart(ctx, {
type: 'line',
data: {
labels: ['January', 'February', 'March', 'April', 'May', 'June', 'July'],
type: 'line',
datasets: [{
data: [78, 81, 80, 45, 34, 12, 40],
label: 'Dataset',
backgroundColor: 'rgba(255,255,255,.2)',
borderColor: 'rgba(255,255,255,.55)',
},]
},
options: {
maintainAspectRatio: true,
legend: {
display: false
},
responsive: true,
// tooltips: {
// mode: 'index',
// titleFontSize: 12,
// titleFontColor: '#000',
// bodyFontColor: '#000',
// backgroundColor: '#fff',
// titleFontFamily: 'Montserrat',
// bodyFontFamily: 'Montserrat',
// cornerRadius: 3,
// intersect: false,
// },
scales: {
xAxes: [{
gridLines: {
color: 'transparent',
zeroLineColor: 'transparent'
},
ticks: {
fontSize: 2,
fontColor: 'transparent'
}
}],
yAxes: [{
display: false,
ticks: {
display: false,
}
}]
},
title: {
display: false,
},
elements: {
line: {
borderWidth: 2
},
point: {
radius: 0,
hitRadius: 10,
hoverRadius: 4
}
}
}
});
//WidgetChart 4
ctx = document.getElementById("widgetChart4");
ctx.height = 70;
myChart = new Chart(ctx, {
type: 'bar',
data: {
labels: ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', ''],
datasets: [
{
label: "My First dataset",
data: [78, 81, 80, 45, 34, 12, 40, 75, 34, 89, 32, 68, 54, 72, 18, 98],
borderColor: "rgba(0, 123, 255, 0.9)",
//borderWidth: "0",
backgroundColor: "rgba(255,255,255,.3)"
}
]
},
options: {
maintainAspectRatio: true,
legend: {
display: false
},
scales: {
xAxes: [{
display: false,
categoryPercentage: 1,
barPercentage: 0.5
}],
yAxes: [{
display: false
}]
}
}
});
ctx = document.getElementById("barchart1");
let barchart1 = new Chart(ctx, {
type: 'bar',
data: {
labels: ["Purchase", "Sales", "Purchase", "Sales", "Purchase", "Sales"],
datasets: [{
label: 'Bar Chart',
data: [17000, 19000, 15000, 14000, 11000, 12000],
backgroundColor: [
'rgba(54, 162, 235, 0.2)',
'rgba(255, 206, 86, 0.2)',
'rgba(54, 162, 235, 0.2)',
'rgba(255, 206, 86, 0.2)',
'rgba(54, 162, 235, 0.2)',
'rgba(255, 206, 86, 0.2)'
],
borderColor: [
'rgba(54, 162, 235, 1)',
'rgba(255, 206, 86, 1)',
'rgba(54, 162, 235, 1)',
'rgba(255, 206, 86, 1)',
'rgba(54, 162, 235, 1)',
'rgba(255, 206, 86, 1)'
],
borderWidth: 1
}]
},
options: {
scales: {
xAxes: [{
ticks: {
autoSkip: false,
maxRotation: 0
},
ticks: {
fontColor: "#fff", // this here
}
}],
yAxes: [{
ticks: {
autoSkip: false,
maxRotation: 0
},
ticks: {
fontColor: "#fff", // this here
}
}]
}
}
});
/*----------------------------------------*/
/* 6. Number count animation
/*----------------------------------------*/
$('.count').each(function () {
$(this).prop('Counter', 0).animate({
Counter: $(this).text()
}, {
duration: 2000,
easing: 'swing',
step: function (now) {
$(this).text(Math.ceil(now));
}
});
});
|
package nomad
import (
"fmt"
"testing"
"time"
memdb "github.com/hashicorp/go-memdb"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil"
"github.com/stretchr/testify/assert"
)
func TestCoreScheduler_EvalGC(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
// Insert "dead" eval
state := s1.fsm.State()
eval := mock.Eval()
eval.Status = structs.EvalStatusFailed
state.UpsertJobSummary(999, mock.JobSummary(eval.JobID))
err := state.UpsertEvals(1000, []*structs.Evaluation{eval})
if err != nil {
t.Fatalf("err: %v", err)
}
// Insert "dead" alloc
alloc := mock.Alloc()
alloc.EvalID = eval.ID
alloc.DesiredStatus = structs.AllocDesiredStatusStop
alloc.JobID = eval.JobID
// Insert "lost" alloc
alloc2 := mock.Alloc()
alloc2.EvalID = eval.ID
alloc2.DesiredStatus = structs.AllocDesiredStatusRun
alloc2.ClientStatus = structs.AllocClientStatusLost
alloc2.JobID = eval.JobID
err = state.UpsertAllocs(1001, []*structs.Allocation{alloc, alloc2})
if err != nil {
t.Fatalf("err: %v", err)
}
// Update the time tables to make this work
tt := s1.fsm.TimeTable()
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.EvalGCThreshold))
// Create a core scheduler
snap, err := state.Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core := NewCoreScheduler(s1, snap)
// Attempt the GC
gc := s1.coreJobEval(structs.CoreJobEvalGC, 2000)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
}
// Should be gone
ws := memdb.NewWatchSet()
out, err := state.EvalByID(ws, eval.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("bad: %v", out)
}
outA, err := state.AllocByID(ws, alloc.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outA != nil {
t.Fatalf("bad: %v", outA)
}
outA2, err := state.AllocByID(ws, alloc2.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outA2 != nil {
t.Fatalf("bad: %v", outA2)
}
}
// An EvalGC should never reap a batch job that has not been stopped
func TestCoreScheduler_EvalGC_Batch(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
// Insert a "dead" job
state := s1.fsm.State()
job := mock.Job()
job.Type = structs.JobTypeBatch
job.Status = structs.JobStatusDead
err := state.UpsertJob(1000, job)
if err != nil {
t.Fatalf("err: %v", err)
}
// Insert "complete" eval
eval := mock.Eval()
eval.Status = structs.EvalStatusComplete
eval.Type = structs.JobTypeBatch
eval.JobID = job.ID
err = state.UpsertEvals(1001, []*structs.Evaluation{eval})
if err != nil {
t.Fatalf("err: %v", err)
}
// Insert "failed" alloc
alloc := mock.Alloc()
alloc.JobID = job.ID
alloc.EvalID = eval.ID
alloc.DesiredStatus = structs.AllocDesiredStatusStop
// Insert "lost" alloc
alloc2 := mock.Alloc()
alloc2.JobID = job.ID
alloc2.EvalID = eval.ID
alloc2.DesiredStatus = structs.AllocDesiredStatusRun
alloc2.ClientStatus = structs.AllocClientStatusLost
err = state.UpsertAllocs(1002, []*structs.Allocation{alloc, alloc2})
if err != nil {
t.Fatalf("err: %v", err)
}
// Update the time tables to make this work
tt := s1.fsm.TimeTable()
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.EvalGCThreshold))
// Create a core scheduler
snap, err := state.Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core := NewCoreScheduler(s1, snap)
// Attempt the GC
gc := s1.coreJobEval(structs.CoreJobEvalGC, 2000)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
}
// Nothing should be gone
ws := memdb.NewWatchSet()
out, err := state.EvalByID(ws, eval.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("bad: %v", out)
}
outA, err := state.AllocByID(ws, alloc.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outA == nil {
t.Fatalf("bad: %v", outA)
}
outA2, err := state.AllocByID(ws, alloc2.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outA2 == nil {
t.Fatalf("bad: %v", outA2)
}
outB, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outB == nil {
t.Fatalf("bad: %v", outB)
}
}
// An EvalGC should reap a batch job that has been stopped
func TestCoreScheduler_EvalGC_BatchStopped(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
// Create a "dead" job
state := s1.fsm.State()
job := mock.Job()
job.Type = structs.JobTypeBatch
job.Status = structs.JobStatusDead
// Insert "complete" eval
eval := mock.Eval()
eval.Status = structs.EvalStatusComplete
eval.Type = structs.JobTypeBatch
eval.JobID = job.ID
err := state.UpsertEvals(1001, []*structs.Evaluation{eval})
if err != nil {
t.Fatalf("err: %v", err)
}
// Insert "failed" alloc
alloc := mock.Alloc()
alloc.JobID = job.ID
alloc.EvalID = eval.ID
alloc.DesiredStatus = structs.AllocDesiredStatusStop
// Insert "lost" alloc
alloc2 := mock.Alloc()
alloc2.JobID = job.ID
alloc2.EvalID = eval.ID
alloc2.DesiredStatus = structs.AllocDesiredStatusRun
alloc2.ClientStatus = structs.AllocClientStatusLost
err = state.UpsertAllocs(1002, []*structs.Allocation{alloc, alloc2})
if err != nil {
t.Fatalf("err: %v", err)
}
// Update the time tables to make this work
tt := s1.fsm.TimeTable()
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.EvalGCThreshold))
// Create a core scheduler
snap, err := state.Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core := NewCoreScheduler(s1, snap)
// Attempt the GC
gc := s1.coreJobEval(structs.CoreJobEvalGC, 2000)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
}
// Everything should be gone
ws := memdb.NewWatchSet()
out, err := state.EvalByID(ws, eval.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("bad: %v", out)
}
outA, err := state.AllocByID(ws, alloc.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outA != nil {
t.Fatalf("bad: %v", outA)
}
outA2, err := state.AllocByID(ws, alloc2.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outA2 != nil {
t.Fatalf("bad: %v", outA2)
}
}
func TestCoreScheduler_EvalGC_Partial(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
// Insert "dead" eval
state := s1.fsm.State()
eval := mock.Eval()
eval.Status = structs.EvalStatusComplete
state.UpsertJobSummary(999, mock.JobSummary(eval.JobID))
err := state.UpsertEvals(1000, []*structs.Evaluation{eval})
if err != nil {
t.Fatalf("err: %v", err)
}
// Insert "dead" alloc
alloc := mock.Alloc()
alloc.EvalID = eval.ID
alloc.DesiredStatus = structs.AllocDesiredStatusStop
state.UpsertJobSummary(1001, mock.JobSummary(alloc.JobID))
// Insert "lost" alloc
alloc2 := mock.Alloc()
alloc2.JobID = alloc.JobID
alloc2.EvalID = eval.ID
alloc2.DesiredStatus = structs.AllocDesiredStatusRun
alloc2.ClientStatus = structs.AllocClientStatusLost
err = state.UpsertAllocs(1002, []*structs.Allocation{alloc, alloc2})
if err != nil {
t.Fatalf("err: %v", err)
}
// Insert "running" alloc
alloc3 := mock.Alloc()
alloc3.EvalID = eval.ID
state.UpsertJobSummary(1003, mock.JobSummary(alloc3.JobID))
err = state.UpsertAllocs(1004, []*structs.Allocation{alloc3})
if err != nil {
t.Fatalf("err: %v", err)
}
// Update the time tables to make this work
tt := s1.fsm.TimeTable()
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.EvalGCThreshold))
// Create a core scheduler
snap, err := state.Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core := NewCoreScheduler(s1, snap)
// Attempt the GC
gc := s1.coreJobEval(structs.CoreJobEvalGC, 2000)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
}
// Should not be gone
ws := memdb.NewWatchSet()
out, err := state.EvalByID(ws, eval.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("bad: %v", out)
}
outA, err := state.AllocByID(ws, alloc3.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outA == nil {
t.Fatalf("bad: %v", outA)
}
// Should be gone
outB, err := state.AllocByID(ws, alloc.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outB != nil {
t.Fatalf("bad: %v", outB)
}
outC, err := state.AllocByID(ws, alloc2.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outC != nil {
t.Fatalf("bad: %v", outC)
}
}
func TestCoreScheduler_EvalGC_Force(t *testing.T) {
t.Parallel()
for _, withAcl := range []bool{false, true} {
t.Run(fmt.Sprintf("with acl %v", withAcl), func(t *testing.T) {
var server *Server
if withAcl {
server, _ = testACLServer(t, nil)
} else {
server = testServer(t, nil)
}
defer server.Shutdown()
testutil.WaitForLeader(t, server.RPC)
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
server.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
// Insert "dead" eval
state := server.fsm.State()
eval := mock.Eval()
eval.Status = structs.EvalStatusFailed
state.UpsertJobSummary(999, mock.JobSummary(eval.JobID))
err := state.UpsertEvals(1000, []*structs.Evaluation{eval})
if err != nil {
t.Fatalf("err: %v", err)
}
// Insert "dead" alloc
alloc := mock.Alloc()
alloc.EvalID = eval.ID
alloc.DesiredStatus = structs.AllocDesiredStatusStop
state.UpsertJobSummary(1001, mock.JobSummary(alloc.JobID))
err = state.UpsertAllocs(1002, []*structs.Allocation{alloc})
if err != nil {
t.Fatalf("err: %v", err)
}
// Create a core scheduler
snap, err := state.Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core := NewCoreScheduler(server, snap)
// Attempt the GC
gc := server.coreJobEval(structs.CoreJobForceGC, 1002)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
}
// Should be gone
ws := memdb.NewWatchSet()
out, err := state.EvalByID(ws, eval.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("bad: %v", out)
}
outA, err := state.AllocByID(ws, alloc.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outA != nil {
t.Fatalf("bad: %v", outA)
}
})
}
}
func TestCoreScheduler_NodeGC(t *testing.T) {
t.Parallel()
for _, withAcl := range []bool{false, true} {
t.Run(fmt.Sprintf("with acl %v", withAcl), func(t *testing.T) {
var server *Server
if withAcl {
server, _ = testACLServer(t, nil)
} else {
server = testServer(t, nil)
}
defer server.Shutdown()
testutil.WaitForLeader(t, server.RPC)
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
server.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
// Insert "dead" node
state := server.fsm.State()
node := mock.Node()
node.Status = structs.NodeStatusDown
err := state.UpsertNode(1000, node)
if err != nil {
t.Fatalf("err: %v", err)
}
// Update the time tables to make this work
tt := server.fsm.TimeTable()
tt.Witness(2000, time.Now().UTC().Add(-1*server.config.NodeGCThreshold))
// Create a core scheduler
snap, err := state.Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core := NewCoreScheduler(server, snap)
// Attempt the GC
gc := server.coreJobEval(structs.CoreJobNodeGC, 2000)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
}
// Should be gone
ws := memdb.NewWatchSet()
out, err := state.NodeByID(ws, node.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("bad: %v", out)
}
})
}
}
func TestCoreScheduler_NodeGC_TerminalAllocs(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
// Insert "dead" node
state := s1.fsm.State()
node := mock.Node()
node.Status = structs.NodeStatusDown
err := state.UpsertNode(1000, node)
if err != nil {
t.Fatalf("err: %v", err)
}
// Insert a terminal alloc on that node
alloc := mock.Alloc()
alloc.DesiredStatus = structs.AllocDesiredStatusStop
state.UpsertJobSummary(1001, mock.JobSummary(alloc.JobID))
if err := state.UpsertAllocs(1002, []*structs.Allocation{alloc}); err != nil {
t.Fatalf("err: %v", err)
}
// Update the time tables to make this work
tt := s1.fsm.TimeTable()
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.NodeGCThreshold))
// Create a core scheduler
snap, err := state.Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core := NewCoreScheduler(s1, snap)
// Attempt the GC
gc := s1.coreJobEval(structs.CoreJobNodeGC, 2000)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
}
// Should be gone
ws := memdb.NewWatchSet()
out, err := state.NodeByID(ws, node.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("bad: %v", out)
}
}
func TestCoreScheduler_NodeGC_RunningAllocs(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
// Insert "dead" node
state := s1.fsm.State()
node := mock.Node()
node.Status = structs.NodeStatusDown
err := state.UpsertNode(1000, node)
if err != nil {
t.Fatalf("err: %v", err)
}
// Insert a running alloc on that node
alloc := mock.Alloc()
alloc.NodeID = node.ID
alloc.DesiredStatus = structs.AllocDesiredStatusRun
alloc.ClientStatus = structs.AllocClientStatusRunning
state.UpsertJobSummary(1001, mock.JobSummary(alloc.JobID))
if err := state.UpsertAllocs(1002, []*structs.Allocation{alloc}); err != nil {
t.Fatalf("err: %v", err)
}
// Update the time tables to make this work
tt := s1.fsm.TimeTable()
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.NodeGCThreshold))
// Create a core scheduler
snap, err := state.Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core := NewCoreScheduler(s1, snap)
// Attempt the GC
gc := s1.coreJobEval(structs.CoreJobNodeGC, 2000)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
}
// Should still be here
ws := memdb.NewWatchSet()
out, err := state.NodeByID(ws, node.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("bad: %v", out)
}
}
func TestCoreScheduler_NodeGC_Force(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
// Insert "dead" node
state := s1.fsm.State()
node := mock.Node()
node.Status = structs.NodeStatusDown
err := state.UpsertNode(1000, node)
if err != nil {
t.Fatalf("err: %v", err)
}
// Create a core scheduler
snap, err := state.Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core := NewCoreScheduler(s1, snap)
// Attempt the GC
gc := s1.coreJobEval(structs.CoreJobForceGC, 1000)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
}
// Should be gone
ws := memdb.NewWatchSet()
out, err := state.NodeByID(ws, node.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("bad: %v", out)
}
}
func TestCoreScheduler_JobGC_OutstandingEvals(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
// Insert job.
state := s1.fsm.State()
job := mock.Job()
job.Type = structs.JobTypeBatch
job.Status = structs.JobStatusDead
err := state.UpsertJob(1000, job)
if err != nil {
t.Fatalf("err: %v", err)
}
// Insert two evals, one terminal and one not
eval := mock.Eval()
eval.JobID = job.ID
eval.Status = structs.EvalStatusComplete
eval2 := mock.Eval()
eval2.JobID = job.ID
eval2.Status = structs.EvalStatusPending
err = state.UpsertEvals(1001, []*structs.Evaluation{eval, eval2})
if err != nil {
t.Fatalf("err: %v", err)
}
// Update the time tables to make this work
tt := s1.fsm.TimeTable()
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.JobGCThreshold))
// Create a core scheduler
snap, err := state.Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core := NewCoreScheduler(s1, snap)
// Attempt the GC
gc := s1.coreJobEval(structs.CoreJobJobGC, 2000)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
}
// Should still exist
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("bad: %v", out)
}
outE, err := state.EvalByID(ws, eval.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outE == nil {
t.Fatalf("bad: %v", outE)
}
outE2, err := state.EvalByID(ws, eval2.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outE2 == nil {
t.Fatalf("bad: %v", outE2)
}
// Update the second eval to be terminal
eval2.Status = structs.EvalStatusComplete
err = state.UpsertEvals(1003, []*structs.Evaluation{eval2})
if err != nil {
t.Fatalf("err: %v", err)
}
// Create a core scheduler
snap, err = state.Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core = NewCoreScheduler(s1, snap)
// Attempt the GC
gc = s1.coreJobEval(structs.CoreJobJobGC, 2000)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
}
// Should not still exist
out, err = state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("bad: %v", out)
}
outE, err = state.EvalByID(ws, eval.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outE != nil {
t.Fatalf("bad: %v", outE)
}
outE2, err = state.EvalByID(ws, eval2.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outE2 != nil {
t.Fatalf("bad: %v", outE2)
}
}
func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
// Insert job.
state := s1.fsm.State()
job := mock.Job()
job.Type = structs.JobTypeBatch
job.Status = structs.JobStatusDead
err := state.UpsertJob(1000, job)
if err != nil {
t.Fatalf("err: %v", err)
}
// Insert an eval
eval := mock.Eval()
eval.JobID = job.ID
eval.Status = structs.EvalStatusComplete
err = state.UpsertEvals(1001, []*structs.Evaluation{eval})
if err != nil {
t.Fatalf("err: %v", err)
}
// Insert two allocs, one terminal and one not
alloc := mock.Alloc()
alloc.JobID = job.ID
alloc.EvalID = eval.ID
alloc.DesiredStatus = structs.AllocDesiredStatusRun
alloc.ClientStatus = structs.AllocClientStatusComplete
alloc2 := mock.Alloc()
alloc2.JobID = job.ID
alloc2.EvalID = eval.ID
alloc2.DesiredStatus = structs.AllocDesiredStatusRun
alloc2.ClientStatus = structs.AllocClientStatusRunning
err = state.UpsertAllocs(1002, []*structs.Allocation{alloc, alloc2})
if err != nil {
t.Fatalf("err: %v", err)
}
// Update the time tables to make this work
tt := s1.fsm.TimeTable()
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.JobGCThreshold))
// Create a core scheduler
snap, err := state.Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core := NewCoreScheduler(s1, snap)
// Attempt the GC
gc := s1.coreJobEval(structs.CoreJobJobGC, 2000)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
}
// Should still exist
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("bad: %v", out)
}
outA, err := state.AllocByID(ws, alloc.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outA == nil {
t.Fatalf("bad: %v", outA)
}
outA2, err := state.AllocByID(ws, alloc2.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outA2 == nil {
t.Fatalf("bad: %v", outA2)
}
// Update the second alloc to be terminal
alloc2.ClientStatus = structs.AllocClientStatusComplete
err = state.UpsertAllocs(1003, []*structs.Allocation{alloc2})
if err != nil {
t.Fatalf("err: %v", err)
}
// Create a core scheduler
snap, err = state.Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core = NewCoreScheduler(s1, snap)
// Attempt the GC
gc = s1.coreJobEval(structs.CoreJobJobGC, 2000)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
}
// Should not still exist
out, err = state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("bad: %v", out)
}
outA, err = state.AllocByID(ws, alloc.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outA != nil {
t.Fatalf("bad: %v", outA)
}
outA2, err = state.AllocByID(ws, alloc2.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outA2 != nil {
t.Fatalf("bad: %v", outA2)
}
}
// This test ensures that batch jobs are GC'd in one shot, meaning it all
// allocs/evals and job or nothing
func TestCoreScheduler_JobGC_OneShot(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
// Insert job.
state := s1.fsm.State()
job := mock.Job()
job.Type = structs.JobTypeBatch
err := state.UpsertJob(1000, job)
if err != nil {
t.Fatalf("err: %v", err)
}
// Insert two complete evals
eval := mock.Eval()
eval.JobID = job.ID
eval.Status = structs.EvalStatusComplete
eval2 := mock.Eval()
eval2.JobID = job.ID
eval2.Status = structs.EvalStatusComplete
err = state.UpsertEvals(1001, []*structs.Evaluation{eval, eval2})
if err != nil {
t.Fatalf("err: %v", err)
}
// Insert one complete alloc and one running on distinct evals
alloc := mock.Alloc()
alloc.JobID = job.ID
alloc.EvalID = eval.ID
alloc.DesiredStatus = structs.AllocDesiredStatusStop
alloc2 := mock.Alloc()
alloc2.JobID = job.ID
alloc2.EvalID = eval2.ID
alloc2.DesiredStatus = structs.AllocDesiredStatusRun
err = state.UpsertAllocs(1002, []*structs.Allocation{alloc, alloc2})
if err != nil {
t.Fatalf("err: %v", err)
}
// Force the jobs state to dead
job.Status = structs.JobStatusDead
// Update the time tables to make this work
tt := s1.fsm.TimeTable()
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.JobGCThreshold))
// Create a core scheduler
snap, err := state.Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core := NewCoreScheduler(s1, snap)
// Attempt the GC
gc := s1.coreJobEval(structs.CoreJobJobGC, 2000)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
}
// Should still exist
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("bad: %v", out)
}
outE, err := state.EvalByID(ws, eval.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outE == nil {
t.Fatalf("bad: %v", outE)
}
outE2, err := state.EvalByID(ws, eval2.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outE2 == nil {
t.Fatalf("bad: %v", outE2)
}
outA, err := state.AllocByID(ws, alloc.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outA == nil {
t.Fatalf("bad: %v", outA)
}
outA2, err := state.AllocByID(ws, alloc2.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outA2 == nil {
t.Fatalf("bad: %v", outA2)
}
}
// This test ensures that stopped jobs are GCd
func TestCoreScheduler_JobGC_Stopped(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
// Insert job.
state := s1.fsm.State()
job := mock.Job()
//job.Status = structs.JobStatusDead
job.Stop = true
err := state.UpsertJob(1000, job)
if err != nil {
t.Fatalf("err: %v", err)
}
// Insert two complete evals
eval := mock.Eval()
eval.JobID = job.ID
eval.Status = structs.EvalStatusComplete
eval2 := mock.Eval()
eval2.JobID = job.ID
eval2.Status = structs.EvalStatusComplete
err = state.UpsertEvals(1001, []*structs.Evaluation{eval, eval2})
if err != nil {
t.Fatalf("err: %v", err)
}
// Insert one complete alloc
alloc := mock.Alloc()
alloc.JobID = job.ID
alloc.EvalID = eval.ID
alloc.DesiredStatus = structs.AllocDesiredStatusStop
err = state.UpsertAllocs(1002, []*structs.Allocation{alloc})
if err != nil {
t.Fatalf("err: %v", err)
}
// Update the time tables to make this work
tt := s1.fsm.TimeTable()
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.JobGCThreshold))
// Create a core scheduler
snap, err := state.Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core := NewCoreScheduler(s1, snap)
// Attempt the GC
gc := s1.coreJobEval(structs.CoreJobJobGC, 2000)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
}
// Shouldn't still exist
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("bad: %v", out)
}
outE, err := state.EvalByID(ws, eval.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outE != nil {
t.Fatalf("bad: %v", outE)
}
outE2, err := state.EvalByID(ws, eval2.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outE2 != nil {
t.Fatalf("bad: %v", outE2)
}
outA, err := state.AllocByID(ws, alloc.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outA != nil {
t.Fatalf("bad: %v", outA)
}
}
func TestCoreScheduler_JobGC_Force(t *testing.T) {
t.Parallel()
for _, withAcl := range []bool{false, true} {
t.Run(fmt.Sprintf("with acl %v", withAcl), func(t *testing.T) {
var server *Server
if withAcl {
server, _ = testACLServer(t, nil)
} else {
server = testServer(t, nil)
}
defer server.Shutdown()
testutil.WaitForLeader(t, server.RPC)
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
server.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
// Insert job.
state := server.fsm.State()
job := mock.Job()
job.Type = structs.JobTypeBatch
job.Status = structs.JobStatusDead
err := state.UpsertJob(1000, job)
if err != nil {
t.Fatalf("err: %v", err)
}
// Insert a terminal eval
eval := mock.Eval()
eval.JobID = job.ID
eval.Status = structs.EvalStatusComplete
err = state.UpsertEvals(1001, []*structs.Evaluation{eval})
if err != nil {
t.Fatalf("err: %v", err)
}
// Create a core scheduler
snap, err := state.Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core := NewCoreScheduler(server, snap)
// Attempt the GC
gc := server.coreJobEval(structs.CoreJobForceGC, 1002)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
}
// Shouldn't still exist
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("bad: %v", out)
}
outE, err := state.EvalByID(ws, eval.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outE != nil {
t.Fatalf("bad: %v", outE)
}
})
}
}
// This test ensures parameterized jobs only get gc'd when stopped
func TestCoreScheduler_JobGC_Parameterized(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
// Insert a parameterized job.
state := s1.fsm.State()
job := mock.Job()
job.Type = structs.JobTypeBatch
job.Status = structs.JobStatusRunning
job.ParameterizedJob = &structs.ParameterizedJobConfig{
Payload: structs.DispatchPayloadRequired,
}
err := state.UpsertJob(1000, job)
if err != nil {
t.Fatalf("err: %v", err)
}
// Create a core scheduler
snap, err := state.Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core := NewCoreScheduler(s1, snap)
// Attempt the GC
gc := s1.coreJobEval(structs.CoreJobForceGC, 1002)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
}
// Should still exist
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("bad: %v", out)
}
// Mark the job as stopped and try again
job2 := job.Copy()
job2.Stop = true
err = state.UpsertJob(2000, job2)
if err != nil {
t.Fatalf("err: %v", err)
}
// Create a core scheduler
snap, err = state.Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core = NewCoreScheduler(s1, snap)
// Attempt the GC
gc = s1.coreJobEval(structs.CoreJobForceGC, 2002)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
}
// Should not exist
out, err = state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("bad: %+v", out)
}
}
// This test ensures periodic jobs don't get GCd til they are stopped
func TestCoreScheduler_JobGC_Periodic(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
// Insert a parameterized job.
state := s1.fsm.State()
job := mock.PeriodicJob()
err := state.UpsertJob(1000, job)
if err != nil {
t.Fatalf("err: %v", err)
}
// Create a core scheduler
snap, err := state.Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core := NewCoreScheduler(s1, snap)
// Attempt the GC
gc := s1.coreJobEval(structs.CoreJobForceGC, 1002)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
}
// Should still exist
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("bad: %v", out)
}
// Mark the job as stopped and try again
job2 := job.Copy()
job2.Stop = true
err = state.UpsertJob(2000, job2)
if err != nil {
t.Fatalf("err: %v", err)
}
// Create a core scheduler
snap, err = state.Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core = NewCoreScheduler(s1, snap)
// Attempt the GC
gc = s1.coreJobEval(structs.CoreJobForceGC, 2002)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
}
// Should not exist
out, err = state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("bad: %+v", out)
}
}
func TestCoreScheduler_DeploymentGC(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
assert := assert.New(t)
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
// Insert an active, terminal, and terminal with allocations edeployment
state := s1.fsm.State()
d1, d2, d3 := mock.Deployment(), mock.Deployment(), mock.Deployment()
d1.Status = structs.DeploymentStatusFailed
d3.Status = structs.DeploymentStatusSuccessful
assert.Nil(state.UpsertDeployment(1000, d1), "UpsertDeployment")
assert.Nil(state.UpsertDeployment(1001, d2), "UpsertDeployment")
assert.Nil(state.UpsertDeployment(1002, d3), "UpsertDeployment")
a := mock.Alloc()
a.JobID = d3.JobID
a.DeploymentID = d3.ID
assert.Nil(state.UpsertAllocs(1003, []*structs.Allocation{a}), "UpsertAllocs")
// Update the time tables to make this work
tt := s1.fsm.TimeTable()
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.DeploymentGCThreshold))
// Create a core scheduler
snap, err := state.Snapshot()
assert.Nil(err, "Snapshot")
core := NewCoreScheduler(s1, snap)
// Attempt the GC
gc := s1.coreJobEval(structs.CoreJobDeploymentGC, 2000)
assert.Nil(core.Process(gc), "Process GC")
// Should be gone
ws := memdb.NewWatchSet()
out, err := state.DeploymentByID(ws, d1.ID)
assert.Nil(err, "DeploymentByID")
assert.Nil(out, "Terminal Deployment")
out2, err := state.DeploymentByID(ws, d2.ID)
assert.Nil(err, "DeploymentByID")
assert.NotNil(out2, "Active Deployment")
out3, err := state.DeploymentByID(ws, d3.ID)
assert.Nil(err, "DeploymentByID")
assert.NotNil(out3, "Terminal Deployment With Allocs")
}
func TestCoreScheduler_DeploymentGC_Force(t *testing.T) {
t.Parallel()
for _, withAcl := range []bool{false, true} {
t.Run(fmt.Sprintf("with acl %v", withAcl), func(t *testing.T) {
var server *Server
if withAcl {
server, _ = testACLServer(t, nil)
} else {
server = testServer(t, nil)
}
defer server.Shutdown()
testutil.WaitForLeader(t, server.RPC)
assert := assert.New(t)
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
server.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
// Insert terminal and active deployment
state := server.fsm.State()
d1, d2 := mock.Deployment(), mock.Deployment()
d1.Status = structs.DeploymentStatusFailed
assert.Nil(state.UpsertDeployment(1000, d1), "UpsertDeployment")
assert.Nil(state.UpsertDeployment(1001, d2), "UpsertDeployment")
// Create a core scheduler
snap, err := state.Snapshot()
assert.Nil(err, "Snapshot")
core := NewCoreScheduler(server, snap)
// Attempt the GC
gc := server.coreJobEval(structs.CoreJobForceGC, 1000)
assert.Nil(core.Process(gc), "Process Force GC")
// Should be gone
ws := memdb.NewWatchSet()
out, err := state.DeploymentByID(ws, d1.ID)
assert.Nil(err, "DeploymentByID")
assert.Nil(out, "Terminal Deployment")
out2, err := state.DeploymentByID(ws, d2.ID)
assert.Nil(err, "DeploymentByID")
assert.NotNil(out2, "Active Deployment")
})
}
}
func TestCoreScheduler_PartitionEvalReap(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
// Create a core scheduler
snap, err := s1.fsm.State().Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core := NewCoreScheduler(s1, snap)
// Set the max ids per reap to something lower.
maxIdsPerReap = 2
evals := []string{"a", "b", "c"}
allocs := []string{"1", "2", "3"}
requests := core.(*CoreScheduler).partitionEvalReap(evals, allocs)
if len(requests) != 3 {
t.Fatalf("Expected 3 requests got: %v", requests)
}
first := requests[0]
if len(first.Allocs) != 2 && len(first.Evals) != 0 {
t.Fatalf("Unexpected first request: %v", first)
}
second := requests[1]
if len(second.Allocs) != 1 && len(second.Evals) != 1 {
t.Fatalf("Unexpected second request: %v", second)
}
third := requests[2]
if len(third.Allocs) != 0 && len(third.Evals) != 2 {
t.Fatalf("Unexpected third request: %v", third)
}
}
func TestCoreScheduler_PartitionDeploymentReap(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
// Create a core scheduler
snap, err := s1.fsm.State().Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core := NewCoreScheduler(s1, snap)
// Set the max ids per reap to something lower.
maxIdsPerReap = 2
deployments := []string{"a", "b", "c"}
requests := core.(*CoreScheduler).partitionDeploymentReap(deployments)
if len(requests) != 2 {
t.Fatalf("Expected 2 requests got: %v", requests)
}
first := requests[0]
if len(first.Deployments) != 2 {
t.Fatalf("Unexpected first request: %v", first)
}
second := requests[1]
if len(second.Deployments) != 1 {
t.Fatalf("Unexpected second request: %v", second)
}
}
|
//import document from "document"
import { Alarm } from "../common/alarm"
import * as util from "../common/utils"
import { today as todayActivity, goals } from "user-activity"
import { $, $at } from '../../common/view'
import { UserInterface } from '../ui'
let document = require("document");
const $ = $at('#activeminutes');
export class ActiveMinutesUI extends UserInterface {
name = 'activeminutes'
constructor() {
super()
this.$ = $
this.el = this.$()
}
onRender() {
super.onRender()
this.$("#time").text = `${todayActivity.local.activeMinutes || 0} actv mins`
}
}
|
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# replace these env var values accordingly
SPARK_HOME=/Users/example/repository/spark
K8S_ENDPOINT=http://localhost:8001
SPARK_EXECUTOR_NUM=1
SPARK_DOCKER_IMAGE=yunikorn/spark:latest
SPARK_EXAMPLE_JAR=local:///opt/spark/examples/jars/spark-examples_2.12-3.0.0-SNAPSHOT.jar
# spark submit command
${SPARK_HOME}/bin/spark-submit \
--master k8s://${K8S_ENDPOINT} --deploy-mode cluster --name spark-pi \
--class org.apache.spark.examples.SparkPi \
--conf spark.executor.instances=${SPARK_EXECUTOR_NUM} \
--conf spark.kubernetes.container.image=${SPARK_DOCKER_IMAGE} \
--conf spark.kubernetes.driver.podTemplateFile=../driver.yaml \
--conf spark.kubernetes.executor.podTemplateFile=../executor.yaml \
${SPARK_EXAMPLE_JAR}
|
<reponame>enriquemartinez-emc/music-store-nestjs
import { Artist as ArtistModel } from '.prisma/client';
import { IQueryHandler, QueryHandler } from '@nestjs/cqrs';
import { PrismaService } from '../prisma/prisma.service';
export class GetArtistsQuery {}
@QueryHandler(GetArtistsQuery)
export class GetArtistsHandler implements IQueryHandler<GetArtistsQuery> {
constructor(private readonly prismaService: PrismaService) {}
async execute(): Promise<ArtistModel[]> {
return this.prismaService.artist.findMany();
}
}
|
#!/usr/bin/env bash
# Copyright 2018 The Go Cloud Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Runs only tests relevant to the current pull request.
# At the moment, this only gates running the Wire test suite.
# See https://github.com/google/go-cloud/issues/28 for solving the
# general case.
# https://coderwall.com/p/fkfaqq/safer-bash-scripts-with-set-euxo-pipefail
set -euxo pipefail
if [[ $# -gt 0 ]]; then
echo "usage: runchecks.sh" 1>&2
exit 64
fi
module="github.com/google/go-cloud"
# Run Go tests for each module.
for path in "." "./internal/contributebot" "./samples/appengine"; do
pushd ${path}
go test -race ./...
wire check ./...
popd
done
|
<reponame>aryan9234/AndroidUtils<gh_stars>0
package com.nayragames.canvasutils;
/**
* (c) 2016 <NAME>
*
* @author <NAME>
* @since 7/20/2016.
*/
public class Pref {
}
|
<filename>moves.go
package pokeapigo
type Move struct {
Id int
Name string
Accuracy int
Effect_Chance int
Pp int
Priority int
Power int
Contest_Combos ContestComboSets
Contest_Type NamedAPIResource
Contest_Effect APIResource
Damage_Class NamedAPIResource
Effect_Entries []VerboseEffect
Effect_Changes []AbilityEffectChange
Flavor_Text_Entries []MoveFlavorText
Generation NamedAPIResource
Machines []MachineVersionDetail
Meta MoveMetaData
Names []Name
Past_Values []PastMoveStatValues
Stat_Changes []MoveStatChange
Super_Contest_Effect APIResource
Target NamedAPIResource
Type NamedAPIResource
}
type ContestComboSets struct {
Normal ContestComboDetail
Super ContestComboDetail
}
type ContestComboDetail struct {
Use_Before []NamedAPIResource
Use_After []NamedAPIResource
}
type MoveFlavorText struct {
Flavor_Text string
Language NamedAPIResource
Version_Group NamedAPIResource
}
type MoveMetaData struct {
Ailment NamedAPIResource
Category NamedAPIResource
Min_Hits int
Max_Hits int
Min_Turns int
Max_Turns int
Drain int
Healing int
Crit_Rate int
Ailment_Chance int
Flinch_Chance int
Stat_Chance int
}
type MoveStatChange struct {
Change int
Stat NamedAPIResource
}
type PastMoveStatValues struct {
Accuracy int
Effect_Chance int
Power int
Pp int
Effect_Entries []VerboseEffect
Type NamedAPIResource
Version_Group NamedAPIResource
}
type MoveAilment struct {
Id int
Name string
Moves []NamedAPIResource
Names []Name
}
type MoveBattleStyle struct {
Id int
Name string
Names []Name
}
type MoveCategory struct {
Id int
Name string
Moves []NamedAPIResource
Descriptions []Description
}
type MoveDamageClass struct {
Id int
Name string
Descriptions []Description
Moves []NamedAPIResource
Names []Name
}
type MoveLearnMethod struct {
Id int
Name string
Descriptions []Description
Names []Name
Version_Groups []NamedAPIResource
}
type MoveTarget struct {
Id int
Name string
Descriptions []Description
Moves []NamedAPIResource
Names []Name
}
|
package moe.mkx.uimf.groupbuilder.dao;
import moe.mkx.uimf.groupbuilder.model.LoginUser;
import java.util.List;
import java.util.Optional;
import java.util.UUID;
public interface LoginUserDao {
int insertPerson(UUID id, LoginUser loginUser);
default int addLoginUser(LoginUser loginUser){
UUID id = UUID.randomUUID();
return insertPerson(id, loginUser);
}
List<LoginUser> selectAllUser();
Optional<LoginUser> selectUserByID(UUID userID);
int deleteUserByID(UUID userID);
int updateUserByID(UUID userID, LoginUser loginUser);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.