text
stringlengths 1
1.05M
|
|---|
/*
The MIT License (MIT)
Copyright (c) 2014 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
const MIN_SAMPLES = 0; // will be initialized when AudioContext is created.
const GOOD_ENOUGH_CORRELATION = 0.9; // this is the "bar" for how close a correlation needs to be
function autoCorrelate(buf: Float32Array, sampleRate: number) {
const SIZE = buf.length;
const MAX_SAMPLES = Math.floor(SIZE / 2);
let best_offset = -1;
let best_correlation = 0;
let rms = 0;
let foundGoodCorrelation = false;
const correlations = [MAX_SAMPLES];
buf.forEach((val) => (rms += val ** 2));
rms = Math.sqrt(rms / SIZE);
if (rms < 0.0008) return 0;
let lastCorrelation = 1;
for (let offset = MIN_SAMPLES; offset < MAX_SAMPLES; offset++) {
let correlation = 0;
for (let i = 0; i < MAX_SAMPLES; i++) {
correlation += Math.abs(buf[i] - buf[i + offset]);
}
correlation = 1 - correlation / MAX_SAMPLES;
correlations[offset] = correlation; // store it, for the tweaking we need to do below.
if (
correlation > GOOD_ENOUGH_CORRELATION &&
correlation > lastCorrelation
) {
foundGoodCorrelation = true;
if (correlation > best_correlation) {
best_correlation = correlation;
best_offset = offset;
}
} else if (foundGoodCorrelation) {
// short-circuit - we found a good correlation, then a bad one, so we'd just be seeing copies from here.
// Now we need to tweak the offset - by interpolating between the values to the left and right of the
// best offset, and shifting it a bit. This is complex, and HACKY in this code (happy to take PRs!) -
// we need to do a curve fit on correlations[] around best_offset in order to better determine precise
// (anti-aliased) offset.
// we know best_offset >=1,
// since foundGoodCorrelation cannot go to true until the second pass (offset=1), and
// we can't drop into this clause until the following pass (else if).
const shift =
(correlations[best_offset + 1] -
correlations[best_offset - 1]) /
correlations[best_offset];
return sampleRate / (best_offset + 8 * shift);
}
lastCorrelation = correlation;
}
if (best_correlation > 0.01) {
// console.log("f = " + sampleRate/best_offset + "Hz (rms: " + rms + " confidence: " + best_correlation + ")")
return sampleRate / best_offset;
}
return 0;
// let best_frequency = sampleRate/best_offset;
}
function get_pitch(audioContext: AudioContext, analyser: AnalyserNode): number {
const buf = new Float32Array(analyser.fftSize);
analyser.getFloatTimeDomainData(buf);
return autoCorrelate(buf, audioContext.sampleRate);
}
export { get_pitch };
|
<reponame>buidler-labs/hedera-mirror-node
package com.hedera.mirror.monitor.publish.transaction.account;
/*-
*
* Hedera Mirror Node
*
* Copyright (C) 2019 - 2022 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.InstanceOfAssertFactories.STRING;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import org.junit.jupiter.api.Test;
import com.hedera.hashgraph.sdk.AccountUpdateTransaction;
import com.hedera.hashgraph.sdk.PrivateKey;
import com.hedera.hashgraph.sdk.PublicKey;
import com.hedera.mirror.monitor.publish.transaction.AbstractTransactionSupplierTest;
class AccountUpdateTransactionSupplierTest extends AbstractTransactionSupplierTest {
@Test
void createWithMinimumData() {
AccountUpdateTransactionSupplier accountUpdateTransactionSupplier = new AccountUpdateTransactionSupplier();
accountUpdateTransactionSupplier.setAccountId(ACCOUNT_ID.toString());
AccountUpdateTransaction actual = accountUpdateTransactionSupplier.get();
assertThat(actual)
.returns(ACCOUNT_ID, AccountUpdateTransaction::getAccountId)
.returns(null, AccountUpdateTransaction::getKey)
.returns(MAX_TRANSACTION_FEE_HBAR, AccountUpdateTransaction::getMaxTransactionFee)
.returns(null, AccountUpdateTransaction::getProxyAccountId)
.returns(false, AccountUpdateTransaction::getReceiverSignatureRequired)
.satisfies(a -> assertThat(a.getExpirationTime()).isNotNull())
.extracting(AccountUpdateTransaction::getAccountMemo, STRING)
.contains("Mirror node updated test account");
}
@Test
void createWithCustomData() {
Instant expirationTime = Instant.now().plus(1, ChronoUnit.DAYS);
PublicKey key = PrivateKey.generate().getPublicKey();
AccountUpdateTransactionSupplier accountUpdateTransactionSupplier = new AccountUpdateTransactionSupplier();
accountUpdateTransactionSupplier.setAccountId(ACCOUNT_ID.toString());
accountUpdateTransactionSupplier.setExpirationTime(expirationTime);
accountUpdateTransactionSupplier.setMaxTransactionFee(1);
accountUpdateTransactionSupplier.setProxyAccountId(ACCOUNT_ID_2.toString());
accountUpdateTransactionSupplier.setPublicKey(key.toString());
accountUpdateTransactionSupplier.setReceiverSignatureRequired(true);
AccountUpdateTransaction actual = accountUpdateTransactionSupplier.get();
assertThat(actual)
.returns(ACCOUNT_ID, AccountUpdateTransaction::getAccountId)
.returns(expirationTime, AccountUpdateTransaction::getExpirationTime)
.returns(key, AccountUpdateTransaction::getKey)
.returns(ONE_TINYBAR, AccountUpdateTransaction::getMaxTransactionFee)
.returns(ACCOUNT_ID_2, AccountUpdateTransaction::getProxyAccountId)
.returns(true, AccountUpdateTransaction::getReceiverSignatureRequired)
.extracting(AccountUpdateTransaction::getAccountMemo, STRING)
.contains("Mirror node updated test account");
}
@Override
protected Class getSupplierClass() {
return AccountUpdateTransactionSupplier.class;
}
}
|
#!/bin/bash
dir=`dirname $0`
exec java -jar $dir/viva.jar $*
|
<gh_stars>0
// Copyright 2013 The StudyGolang Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// http://studygolang.com
// Author:polaris <EMAIL>
package service_test
import (
. "service"
"testing"
)
func TestFindNewBlogs(t *testing.T) {
blogList := FindNewBlogs()
if len(blogList) == 0 {
t.Fatal("xxxx")
}
t.Log(len(blogList))
for k, blog := range blogList {
t.Log(k, blog)
t.Log("===")
}
}
|
#!/usr/bin/env bash
set -euv
branch="v8.9"
if [ ! -d coq-serapi/.git ]; then
git clone https://github.com/ejgallego/coq-serapi.git
fi
cd coq-serapi
git fetch origin $branch
git checkout $branch
# Slower:
# SERAPI_COQ_HOME="$PWD/../coq/" COQBIN="../coq/bin" (make clean; make)
#SERAPI_COQ_HOME="$PWD/../coq/" COQBIN="../coq/bin" make build
make build
#OCAMLPATH=$OCAMLPATH SERAPI_COQ_HOME="$PWD/../coq/" COQBIN="../coq/bin" make sertop
|
#!/bin/bash
# exclude vendor/
SOURCES="./oragono.go ./irc"
if [ "$1" = "--fix" ]; then
exec gofmt -s -w $SOURCES
fi
if [ -n "$(gofmt -s -l $SOURCES)" ]; then
echo "Go code is not formatted correctly with \`gofmt -s\`:"
gofmt -s -d $SOURCES
exit 1
fi
|
import zlib
import base64
import urllib.parse
def encode_saml_request(StringSAMLRequest):
zlibbed_str = zlib.compress(StringSAMLRequest.encode())
compressed_string = zlibbed_str[2:-4]
encoded_string = base64.b64encode(compressed_string)
return urllib.parse.quote_plus(encoded_string)
def decode_saml_request(encodedSAMLRequest):
decoded_string = urllib.parse.unquote_plus(encodedSAMLRequest)
inflated_string = zlib.decompress(base64.b64decode(decoded_string + '=='))
return inflated_string.decode()
def main():
decision = input('For encoding choose 1, for decoding choose 2: ')
if decision == '2':
decoded_saml_request = decode_saml_request(input('Encoded SAMLRequest: '))
print('Decoded SAMLRequest:', decoded_saml_request)
elif decision == '1':
encoded_saml_request = encode_saml_request(input('SAMLRequest: '))
print('Encoded SAMLRequest:', encoded_saml_request)
else:
print('Invalid choice. Please choose 1 for encoding or 2 for decoding.')
if __name__ == "__main__":
main()
|
package com.yoga.admin.shiro;
import org.apache.shiro.spring.web.ShiroFilterFactoryBean;
import org.apache.shiro.web.filter.mgt.FilterChainManager;
import org.apache.shiro.web.filter.mgt.PathMatchingFilterChainResolver;
import org.apache.shiro.web.servlet.AbstractShiroFilter;
import org.springframework.beans.factory.annotation.Autowired;
public class FilterFactoryBean extends ShiroFilterFactoryBean {
@Autowired
private OperatorShiroFilter filter;
@Override
protected AbstractShiroFilter createInstance() throws Exception {
FilterChainManager manager = createFilterChainManager();
PathMatchingFilterChainResolver chainResolver = new PathMatchingFilterChainResolver();
chainResolver.setFilterChainManager(manager);
filter.setFilterChainResolver(chainResolver);
return filter;
}
}
|
<reponame>hafizalfaza/rrssrbp
/* @flow */
import React from 'react'
import { Link } from 'react-router-dom'
import { Helmet } from 'react-helmet'
import { connect } from 'react-redux';
import { usersAction } from '../../actions/usersAction';
import { loadUser } from '../../actions/usersAction';
import Page from 'universal/components/Page'
import Center from 'universal/components/Center'
import Text from 'universal/components/Text'
import Flexbox from 'universal/components/Flexbox'
export const IndexPage = (props) => {
return (
<Page backgroundColor={'#03A9F4'}>
<Helmet>
<title>Index Page</title>
</Helmet>
<Center>
<Text fontSize={'10vw'}>Index Page</Text>
</Center>
<Center>
<Link to={'/not-found'}>
<Text fontSize={'5vw'}>Go to Not Found Page</Text>
</Link>
<button onClick={() => props.usersAction()}>click me</button>
<div>{props.users.test}</div>
</Center>
<Flexbox backgroundColor="red"></Flexbox>
</Page>
)
}
export function loadData(store){
store.dispatch(loadUser());
}
function mapStateToProps(state){
return {
users: state.users
}
}
function mapDispatchToProps(dispatch){
return {
usersAction: () => {dispatch(usersAction())}
}
}
export default connect(mapStateToProps, mapDispatchToProps)(IndexPage)
|
package com.serifgungor.volleyloginregister.Activity;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.util.Log;
import android.widget.ListView;
import com.android.volley.AuthFailureError;
import com.android.volley.Request;
import com.android.volley.RequestQueue;
import com.android.volley.Response;
import com.android.volley.VolleyError;
import com.android.volley.toolbox.StringRequest;
import com.android.volley.toolbox.Volley;
import com.serifgungor.volleyloginregister.Adapter.PaylasimAdapter;
import com.serifgungor.volleyloginregister.Model.Paylasim;
import com.serifgungor.volleyloginregister.R;
import java.util.ArrayList;
import java.util.Map;
public class LoggedActivity extends AppCompatActivity {
PaylasimAdapter adapter;
ArrayList<Paylasim> paylasim = new ArrayList<>();
ListView listView;
RequestQueue queue;
public void paylasimlariListele(){
StringRequest stringRequest = new StringRequest(
Request.Method.POST,
"http://10.1.9.14:8081/android2_gun1/paylasim_listele.php",
new Response.Listener<String>() {
@Override
public void onResponse(String response) {
Log.d("RESPONSE",response);
}
},
new Response.ErrorListener() {
@Override
public void onErrorResponse(VolleyError error) {
}
}
){
@Override
protected Map<String, String> getParams() throws AuthFailureError {
return super.getParams();
}
};
queue.add(stringRequest);
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_logged);
queue = Volley.newRequestQueue(getApplicationContext());
listView = findViewById(R.id.listViewPaylasimlar);
/*
paylasim.add(new Paylasim(1,"Tarih","İçerik","Email"));
paylasim.add(new Paylasim(1,"Tarih","İçerik","Email"));
paylasim.add(new Paylasim(1,"Tarih","İçerik","Email"));
paylasim.add(new Paylasim(1,"Tarih","İçerik","Email"));
adapter = new PaylasimAdapter(paylasim,this);
listView.setAdapter(adapter);
*/
paylasimlariListele();
}
}
|
#! /bin/bash -e
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
if [ $# != 1 ] ; then
echo "Usage: $0 <path to results directory>" >& 2
exit 1
else
STATDIR="$(cd $1 > /dev/null 2>&1 && pwd)"
AGGREGATEOUT="${STATDIR}.csv"
if [ ! -d ${STATDIR} ] ; then
echo "ERROR: failed to find directory \"${STATDIR}\"" >& 2
exit 1
elif ! rm -f ${AGGREGATEOUT} || ! touch ${AGGREGATEOUT} ; then
echo "ERROR: failed to create new \"${AGGREGATEOUT}\"" >& 2
exit 1
fi
fi
if [ ! -r "${SCRIPT_DIR}/stress-ng-wrap-list.sh.include" ] ; then
echo "ERROR: failed to find \"stress-ng-wrap-list.sh.include\"" >&2
exit 1
else
source "${SCRIPT_DIR}/stress-ng-wrap-list.sh.include"
if [ -z ${STRESSNG_FUNC_ARRAY} ] ; then
echo "ERROR: failed to find non-empty \"STRESSNG_FUNC_ARRAY\"" >&2
exit 1
fi
fi
REPETITION=6
DURATION_MIN=5
DURATION_MAX=14
FIRST_STATFILE="${STATDIR}/${STRESSNG_FUNC_ARRAY[0]}-${DURATION_MIN}sec-1.csv"
if [ ! -r ${FIRST_STATFILE} ] ; then
echo "ERROR: failed to find readable file \"${FIRST_STATFILE}\"" >& 2
exit 1
else
REFERENCE_HEADER="$(head -n 1 ${FIRST_STATFILE})"
echo "Workload, Background, TargetDuration, Repetition, ${REFERENCE_HEADER}" >> ${AGGREGATEOUT}
fi
background="system-noise"
# For each workload
for workload in ${STRESSNG_FUNC_ARRAY[@]} ; do
# For each duration
for d in $( seq ${DURATION_MIN} 1 ${DURATION_MAX} ) ; do
# For each repetition
for r in $( seq 1 1 ${REPETITION} ) ; do
# Check the processed perf-stat output exists and
# it's header matches, then dump it's contents into the aggregate
PERFSTATOUTFILE="${STATDIR}/${workload}-${d}sec-${r}.csv"
if [ ! -r ${PERFSTATOUTFILE} ] ; then
echo "ERROR: failed to find readable file \"${PERFSTATOUTFILE}\"" >& 2
exit 1
else
STATFILEHEADER="$(head -n 1 ${PERFSTATOUTFILE})"
if [ "${STATFILEHEADER}" != "${REFERENCE_HEADER}" ] ; then
echo "ERROR: mis-matched header for \"${PERFSTATOUTFILE}\"" >& 2
exit 1
else
STATFILEDATA="$(tail -n 1 ${PERFSTATOUTFILE})"
echo "${workload}, ${background}, ${d}, ${r}, ${STATFILEDATA}" >> ${AGGREGATEOUT}
fi
fi
done
done
done
|
import torchvision.transforms as transforms
from torchvision import datasets
class ImageDatasetLoader:
def __init__(self, root):
self.root = root
# Initialize any necessary variables or configurations here
def _load_dataset(self):
# Apply the specified transformations to the dataset images
transform = transforms.Compose([
transforms.CenterCrop(84),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
# Load the dataset from the root path and apply the transformations
dataset = datasets.ImageFolder(root=self.root, transform=transform)
return dataset
|
<filename>EliminacaoGaussiana.py
# Algoritimo 06 - Eliminação de Gauss (Retrosubstituição)
M = [[1,2,3,1],
[-1,2,1,2],
[1,-2,1,3]]
def showMatrix(M): # Mostra a matriz
for i in M:
print(i,sep=' ')
print()
def showVector(V): # Mostra vetor
n=0
for i in V:
n+=1
print(f" x{n}:", i, end=' ')
print()
def elmGau(M): # Eliminação Gaussiana (Encontra Matriz Triangular Superior)
n = len(M)
xn = []
for k in range(n):
if M[k][k]==0:
print('Elemento nulo na posição pivotal.')
break
else:
for i in range(k+1,n):
m = -M[i][k]/M[k][k]
xn.append(m)
for j in range(k,n+1):
M[i][j] = M[i][j] + m * M[k][j]
return M
def retSub(M): # Retrosubstituição (Encontra Coeficientes xi's)
n = len(M)
xn = n*[0]
for i in range (n-1,-1,-1):
s = sum([M[i][j] * xn[j] for j in range(i+1,n)])
xn[i] = (M[i][n]-s)/M[i][i]
return xn
print('# Matriz Inicial:')
showMatrix(M)
print('# Matriz Triangular:')
M_triangular = elmGau(M)
showMatrix(M_triangular)
print('# Vetor de Coeficientes:')
M_coeficientes = retSub(M)
showVector(M_coeficientes)
|
package math;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.util.StringTokenizer;
public class Boj11006 {
public static final String NEW_LINE = "\n";
public static final String SPACE = " ";
public static void main(String[] args) throws Exception {
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
StringBuilder sb = new StringBuilder();
int T = Integer.parseInt(br.readLine());
for (int i = 0; i < T; i++) {
StringTokenizer st = new StringTokenizer(br.readLine(), " ");
int N = Integer.parseInt(st.nextToken());
int M = Integer.parseInt(st.nextToken());
int legs = (M * 2) - N;
int saved = M - legs;
sb.append(legs).append(SPACE).append(saved).append(NEW_LINE);
}
System.out.println(sb.toString());
}
}
|
#!/usr/bin/env bash
ghc Params.hs
|
const Menu = require('../menu');
module.exports = class ConfigMenu extends Menu {
constructor() {
super('Configurações', 'configs_menu');
}
};
|
python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/1024+0+512-SS-N/model --tokenizer_name model-configs/1536-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/1024+0+512-SS-N/512+512+512-SS-N-VB-first-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function shuffle_sentences_remove_all_but_nouns_and_verbs_first_third_sixth --eval_function penultimate_sixth_eval
|
<filename>packages/sitecore-jss-rendering-host/src/ssrMiddleware.ts
import { IncomingMessage, ServerResponse } from 'http';
import zlib from 'zlib'; // node.js standard lib
export type RenderCallback = (errorValue: Error, successValue?: any) => void;
export type RenderFunction = (callback: RenderCallback, ...args: any[]) => void;
export interface AppInvocationInfo {
renderFunction: RenderFunction;
renderFunctionArgs: any[];
renderFunctionCallback?: RenderCallback;
}
export type AppInvocationInfoResolver = (
bodyJson: string,
req: IncomingMessage,
res: ServerResponse
) => AppInvocationInfo;
export interface SSRMiddlewareOptions {
appInvocationInfoResolver: AppInvocationInfoResolver;
}
export type WebServerMiddleware = (
req: IncomingMessage,
res: ServerResponse,
next?: (err?: any) => void
) => void;
export type SSRMiddleware = (options: SSRMiddlewareOptions) => WebServerMiddleware;
// don't assume this middleware will always be used by WebpackDevServer
// it may also be used by a "standalone" JSS rendering host / express server.
export const ssrMiddleware: SSRMiddleware = ({
appInvocationInfoResolver,
}: SSRMiddlewareOptions) => {
if (!appInvocationInfoResolver) {
throw new Error(`No AppInvocationInfo resolver was provided for SSR middleware`);
}
return (req: IncomingMessage, res: ServerResponse) => {
let callback: RenderCallback;
readRequestBodyAsJson(req)
.then((bodyJson: any) => {
if (!bodyJson) {
throw new Error(`Request body was not JSON: ${req.url}`);
}
const invocationInfo = appInvocationInfoResolver(bodyJson, req, res);
callback = invocationInfo.renderFunctionCallback || getDefaultAppRendererCallback(res);
invocationInfo.renderFunction(callback, ...invocationInfo.renderFunctionArgs);
})
.catch((err) => {
console.error(err);
callback(err, null);
});
};
};
// todo: add hook for modifying html / response before end
export function getDefaultAppRendererCallback(res: ServerResponse) {
const callback: RenderCallback = (errorValue: Error, successValue?: any) => {
if (errorValue) {
respondWithError(res, errorValue);
} else if (typeof successValue !== 'string') {
// Arbitrary object/number/etc - JSON-serialize it
let successValueJson = {};
try {
successValueJson = JSON.stringify(successValue);
} catch (ex) {
// JSON serialization error - pass it back to http caller.
respondWithError(res, ex);
return;
}
res.setHeader('Content-Type', 'application/json');
res.end(successValueJson);
} else {
// String - can bypass JSON-serialization altogether
res.setHeader('Content-Type', 'text/plain');
res.end(successValue);
}
};
return callback;
}
export function readRequestBodyAsJson(request: IncomingMessage) {
const dataWriter = { output: Buffer.from('') };
request.on('data', onReadableStreamDataHandler(dataWriter));
return new Promise((resolve, reject) => {
request.on('end', () => {
const contentEncoding = request.headers['content-encoding'];
extractJsonFromStreamData(dataWriter.output, contentEncoding)
.then((json) => resolve(json))
.catch((err) => reject(err));
});
});
}
export function respondWithError(res: ServerResponse, errorValue: Error) {
console.error(errorValue);
res.statusCode = 500;
res.end(
JSON.stringify({
errorMessage: errorValue.message || errorValue,
errorDetails: errorValue.stack || null,
})
);
}
export function onReadableStreamDataHandler(dataWriter: { output: Buffer }) {
return (data: any) => {
if (Buffer.isBuffer(data)) {
dataWriter.output = Buffer.concat([dataWriter.output, data]); // append raw buffer
} else {
dataWriter.output = Buffer.concat([dataWriter.output, Buffer.from(data)]);
}
};
}
export function extractJsonFromStreamData(data: Buffer, contentEncoding?: string): Promise<object> {
let responseString: Promise<string>;
if (
contentEncoding &&
(contentEncoding.indexOf('gzip') !== -1 || contentEncoding.indexOf('deflate') !== -1)
) {
responseString = new Promise((resolve, reject) => {
zlib.unzip(data, (error, result) => {
if (error) {
reject(error);
}
if (result) {
resolve(result.toString('utf-8'));
}
});
});
} else {
responseString = Promise.resolve(data.toString('utf-8'));
}
return responseString.then(tryParseJson);
}
export function tryParseJson(jsonString: string) {
try {
const json = JSON.parse(jsonString);
// handle non-exception-throwing cases
if (json && typeof json === 'object' && json !== null) {
return json;
}
} catch (e) {
console.error(`error parsing json string '${jsonString}'`, e);
}
return null;
}
|
<gh_stars>0
/**
* This program and the accompanying materials
* are made available under the terms of the License
* which accompanies this distribution in the file LICENSE.txt
*/
package com.archimatetool.csv.export;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import org.eclipse.emf.ecore.EObject;
import com.archimatetool.csv.CSVConstants;
import com.archimatetool.editor.utils.StringUtils;
import com.archimatetool.model.FolderType;
import com.archimatetool.model.IAccessRelationship;
import com.archimatetool.model.IArchimateConcept;
import com.archimatetool.model.IArchimateElement;
import com.archimatetool.model.IArchimateModel;
import com.archimatetool.model.IArchimateRelationship;
import com.archimatetool.model.IAssociationRelationship;
import com.archimatetool.model.IFolder;
import com.archimatetool.model.IInfluenceRelationship;
import com.archimatetool.model.IJunction;
import com.archimatetool.model.IProperty;
/**
* CSV Exporter
*
* @author <NAME>
*/
public class CSVExporter implements CSVConstants {
private char fDelimiter = ',';
private String fFilePrefix = ""; //$NON-NLS-1$
private boolean fStripNewLines = false;
// See http://www.creativyst.com/Doc/Articles/CSV/CSV01.htm#CSVAndExcel
private boolean fUseLeadingCharsHack = false;
private String fEncoding = "UTF-8"; //$NON-NLS-1$
/*
* Internal option. BUT...
* If one exports to the csv files with a model that has properties, then edits the model and removes all properties,
* and re-exports to CSV. The original "properties.csv" file would still exist, containing orphans.
* So it seems better to export all three to be on the safe side, even if one is empty.
* This way we can be sure that elements, relations, and properties are always a matched tuple.
*/
private boolean fWriteEmptyFile = true;
private IArchimateModel fModel;
public CSVExporter(IArchimateModel model) {
fModel = model;
}
public void export(File folder) throws IOException {
writeModelAndElements(new File(folder, createElementsFileName()));
writeRelationships(new File(folder, createRelationsFileName()));
writeProperties(new File(folder, createPropertiesFileName()));
}
/**
* Set the delimiter character.
* Default is the comma ","
* @param delimiter
*/
public void setDelimiter(char delimiter) {
fDelimiter = delimiter;
}
/**
* Set the prefix to use on file names. A null value is ignored.
* @param prefix
*/
public void setFilePrefix(String prefix) {
if(prefix != null) {
fFilePrefix = prefix;
}
}
public void setStripNewLines(boolean set) {
fStripNewLines = set;
}
public void setUseLeadingCharsHack(boolean set) {
fUseLeadingCharsHack = set;
}
public void setEncoding(String encoding) {
fEncoding = encoding;
}
/**
* Write the Model and All Elements
*/
private void writeModelAndElements(File file) throws IOException {
Writer writer = createOutputStreamWriter(file);
// Write BOM
writeBOM(writer);
// Write Header
String header = createHeader(MODEL_ELEMENTS_HEADER);
writer.write(header);
// CRLF
writer.write(CRLF);
// Write Model
String modelRow = createModelRow();
writer.write(modelRow);
// Write Elements
writeElementsInFolder(writer, fModel.getFolder(FolderType.STRATEGY));
writeElementsInFolder(writer, fModel.getFolder(FolderType.BUSINESS));
writeElementsInFolder(writer, fModel.getFolder(FolderType.APPLICATION));
writeElementsInFolder(writer, fModel.getFolder(FolderType.TECHNOLOGY));
writeElementsInFolder(writer, fModel.getFolder(FolderType.MOTIVATION));
writeElementsInFolder(writer, fModel.getFolder(FolderType.IMPLEMENTATION_MIGRATION));
writeElementsInFolder(writer, fModel.getFolder(FolderType.OTHER));
writer.close();
}
/**
* Write all elements in a given folder and its child folders to Writer
*/
private void writeElementsInFolder(Writer writer, IFolder folder) throws IOException {
if(folder == null) {
return;
}
List<IArchimateConcept> concepts = getConcepts(folder);
sort(concepts);
for(IArchimateConcept concept : concepts) {
if(concept instanceof IArchimateElement) {
writer.write(CRLF);
writer.write(createElementRow((IArchimateElement)concept));
}
}
}
/**
* Write All Relationships
*/
private void writeRelationships(File file) throws IOException {
List<IArchimateConcept> concepts = getConcepts(fModel.getFolder(FolderType.RELATIONS));
sort(concepts);
// Are there any to write?
if(!fWriteEmptyFile && concepts.isEmpty()) {
return;
}
Writer writer = createOutputStreamWriter(file);
// Write BOM
writeBOM(writer);
// Write Header
String header = createHeader(RELATIONSHIPS_HEADER);
writer.write(header);
// Write Relationships
for(IArchimateConcept concept : concepts) {
if(concept instanceof IArchimateRelationship) {
writer.write(CRLF);
writer.write(createRelationshipRow((IArchimateRelationship)concept));
}
}
writer.close();
}
/**
* Write All Properties
*/
private void writeProperties(File file) throws IOException {
// Are there any to write?
if(!fWriteEmptyFile && !hasProperties()) {
return;
}
Writer writer = createOutputStreamWriter(file);
// Write BOM
writeBOM(writer);
// Write Header
String header = createHeader(PROPERTIES_HEADER);
writer.write(header);
// Write Model Properties
for(IProperty property : fModel.getProperties()) {
writer.write(CRLF);
writer.write(createPropertyRow(fModel.getId(), property));
}
// Write Element and Relationship Properties
for(Iterator<EObject> iter = fModel.eAllContents(); iter.hasNext();) {
EObject eObject = iter.next();
if(eObject instanceof IArchimateConcept) {
IArchimateConcept concept = (IArchimateConcept)eObject;
for(IProperty property : concept.getProperties()) {
writer.write(CRLF);
writer.write(createPropertyRow(concept.getId(), property));
}
// Write special attributes as properties
writeSpecialProperties(writer, concept);
}
}
writer.close();
}
private void writeSpecialProperties(Writer writer, IArchimateConcept concept) throws IOException {
// Influence relationship strength
if(concept instanceof IInfluenceRelationship) {
String strength = ((IInfluenceRelationship)concept).getStrength();
if(StringUtils.isSet(strength)) {
writer.write(CRLF);
writer.write(createPropertyRow(concept.getId(), INFLUENCE_STRENGTH, strength));
}
}
// Access relationship type
else if(concept instanceof IAccessRelationship) {
writer.write(CRLF);
writer.write(createPropertyRow(concept.getId(), ACCESS_TYPE, ACCESS_TYPES.get(((IAccessRelationship)concept).getAccessType())));
}
// Association relationship directed
else if(concept instanceof IAssociationRelationship) {
writer.write(CRLF);
writer.write(createPropertyRow(concept.getId(), ASSOCIATION_DIRECTED,
((IAssociationRelationship)concept).isDirected() ? "true" : "false")); //$NON-NLS-1$ //$NON-NLS-2$
}
// Junction Type
else if(concept instanceof IJunction) {
String type = ((IJunction)concept).getType();
if(IJunction.AND_JUNCTION_TYPE.equals(type)) {
type = JUNCTION_AND;
}
else {
type = JUNCTION_OR;
}
writer.write(CRLF);
writer.write(createPropertyRow(concept.getId(), JUNCTION_TYPE, type));
}
}
/**
* @return true if the model has any user properties
*/
boolean hasProperties() {
if(!fModel.getProperties().isEmpty()) {
return true;
}
for(Iterator<EObject> iter = fModel.eAllContents(); iter.hasNext();) {
EObject eObject = iter.next();
if(eObject instanceof IArchimateConcept) {
IArchimateConcept concept = (IArchimateConcept)eObject;
if(!concept.getProperties().isEmpty()) {
return true;
}
}
}
return false;
}
/**
* Create a Header from given string elements
*/
String createHeader(String[] elements) {
StringBuffer sb = new StringBuffer();
for(int i = 0; i < elements.length; i++) {
String s = elements[i];
sb.append("\""); //$NON-NLS-1$
sb.append(s);
sb.append("\""); //$NON-NLS-1$
if(i < elements.length - 1) {
sb.append(fDelimiter);
}
}
return sb.toString();
}
/**
* Create a String Row for the Archimate Model
*/
String createModelRow() {
StringBuffer sb = new StringBuffer();
String id = fModel.getId();
sb.append(surroundWithQuotes(id));
sb.append(fDelimiter);
sb.append(surroundWithQuotes(ARCHIMATE_MODEL_TYPE));
sb.append(fDelimiter);
String name = normalise(fModel.getName());
sb.append(surroundWithQuotes(name));
sb.append(fDelimiter);
String purpose = normalise(fModel.getPurpose());
sb.append(surroundWithQuotes(purpose));
return sb.toString();
}
/**
* Create a String Row for an Element
*/
String createElementRow(IArchimateElement element) {
StringBuffer sb = new StringBuffer();
String id = element.getId();
sb.append(surroundWithQuotes(id));
sb.append(fDelimiter);
sb.append(surroundWithQuotes(element.eClass().getName()));
sb.append(fDelimiter);
String name = normalise(element.getName());
sb.append(surroundWithQuotes(name));
sb.append(fDelimiter);
String documentation = normalise(element.getDocumentation());
sb.append(surroundWithQuotes(documentation));
return sb.toString();
}
/**
* Create a String Row for a Relationship
*/
String createRelationshipRow(IArchimateRelationship relationship) {
StringBuffer sb = new StringBuffer();
String id = relationship.getId();
sb.append(surroundWithQuotes(id));
sb.append(fDelimiter);
sb.append(surroundWithQuotes(relationship.eClass().getName()));
sb.append(fDelimiter);
String name = normalise(relationship.getName());
sb.append(surroundWithQuotes(name));
sb.append(fDelimiter);
String documentation = normalise(relationship.getDocumentation());
sb.append(surroundWithQuotes(documentation));
sb.append(fDelimiter);
if(relationship.getSource() != null) {
String sourceID = relationship.getSource().getId();
sb.append(surroundWithQuotes(sourceID));
}
else {
sb.append("\"\""); //$NON-NLS-1$
}
sb.append(fDelimiter);
if(relationship.getTarget() != null) {
String targetID = relationship.getTarget().getId();
sb.append(surroundWithQuotes(targetID));
}
else {
sb.append("\"\""); //$NON-NLS-1$
}
return sb.toString();
}
/**
* Create a String Row for a Property
*/
String createPropertyRow(String conceptID, IProperty property) {
return createPropertyRow(conceptID, property.getKey(), property.getValue());
}
/**
* Create a String Row for a Key/Value
*/
String createPropertyRow(String conceptID, String key, String value) {
StringBuffer sb = new StringBuffer();
sb.append(surroundWithQuotes(conceptID));
sb.append(fDelimiter);
key = normalise(key);
sb.append(surroundWithQuotes(key));
sb.append(fDelimiter);
value = normalise(value);
sb.append(surroundWithQuotes(value));
return sb.toString();
}
/**
* Write BOM byte to file
* @param writer
* @throws IOException
*/
private void writeBOM(Writer writer) throws IOException {
if(fEncoding.contains("BOM")) { //$NON-NLS-1$
writer.write('\ufeff');
}
}
/**
* Return a normalised String.
* A Null string is returned as an empty string
* All types of CR and LF and TAB characters are converted to single spaces
*/
String normalise(String s) {
if(s == null) {
return ""; //$NON-NLS-1$
}
// Newlines (optional)
if(fStripNewLines) {
s = s.replaceAll("(\r\n|\r|\n)", " "); //$NON-NLS-1$//$NON-NLS-2$
}
// Tabs become a space
s = s.replace("\t", " "); //$NON-NLS-1$ //$NON-NLS-2$
// Single quotes become double quotes
s = s.replace("\"", "\"\""); //$NON-NLS-1$//$NON-NLS-2$
return s;
}
String surroundWithQuotes(String s) {
if(needsLeadingCharHack(s)) {
return "\"=\"\"" + s + "\"\"\""; //$NON-NLS-1$ //$NON-NLS-2$
}
return "\"" + s + "\""; //$NON-NLS-1$ //$NON-NLS-2$
}
boolean needsLeadingCharHack(String s) {
return s != null && fUseLeadingCharsHack && (s.startsWith(" ") || s.startsWith("0")); //$NON-NLS-1$//$NON-NLS-2$
}
/**
* Return a list of all elements/relations in a given folder and its child folders
*/
private List<IArchimateConcept> getConcepts(IFolder folder) {
List<IArchimateConcept> list = new ArrayList<IArchimateConcept>();
if(folder == null) {
return list;
}
for(EObject object : folder.getElements()) {
if(object instanceof IArchimateConcept) {
list.add((IArchimateConcept)object);
}
}
for(IFolder f : folder.getFolders()) {
list.addAll(getConcepts(f));
}
return list;
}
/**
* Sort a list of ArchimateElement/Relationship types
* Sort by class name then element name
*/
void sort(List<IArchimateConcept> list) {
if(list == null || list.size() < 2) {
return;
}
Collections.sort(list, new Comparator<IArchimateConcept>() {
@Override
public int compare(IArchimateConcept o1, IArchimateConcept o2) {
if(o1.eClass().equals(o2.eClass())) {
String name1 = StringUtils.safeString(o1.getName().toLowerCase().trim());
String name2 = StringUtils.safeString(o2.getName().toLowerCase().trim());
return name1.compareTo(name2);
}
String name1 = o1.eClass().getName().toLowerCase();
String name2 = o2.eClass().getName().toLowerCase();
return name1.compareTo(name2);
}
});
}
String createElementsFileName() {
return fFilePrefix + ELEMENTS_FILENAME + FILE_EXTENSION;
}
String createRelationsFileName() {
return fFilePrefix + RELATIONS_FILENAME + FILE_EXTENSION;
}
String createPropertiesFileName() {
return fFilePrefix + PROPERTIES_FILENAME + FILE_EXTENSION;
}
OutputStreamWriter createOutputStreamWriter(File file) throws IOException {
if("ANSI".equals(fEncoding)) { //$NON-NLS-1$
return new OutputStreamWriter(new FileOutputStream(file));
}
else if(fEncoding.startsWith("UTF-8")) { //$NON-NLS-1$
return new OutputStreamWriter(new FileOutputStream(file), "UTF-8"); //$NON-NLS-1$
}
else {
return new OutputStreamWriter(new FileOutputStream(file), fEncoding);
}
}
}
|
<filename>src/api/java/net/blay09/mods/cookingforblockheads/api/ToastHandler.java
package net.blay09.mods.cookingforblockheads.api;
import net.minecraft.item.ItemStack;
public interface ToastHandler {
ItemStack getToasterOutput(ItemStack itemStack);
}
|
{ # Prevent execution if this script was only partially downloaded
set -Eeuo pipefail
log() {
echo "$0:" "$@" >&2
}
die() {
log "FAILURE: $1"
exit 1
}
require_util() {
type "$1" > /dev/null 2>&1 || command -v "$1" > /dev/null 2>&1 || \
die "This script requires '$1', which could not be found."
}
check_dest() {
if [ -z "$UID" ]; then
die "\$UID is not set."
fi
log "Checking for empty '$dest' with uid='$UID' mode='0755'..."
if [ ! -e "$dest" ]; then
log "'$dest' does not exist. Please create it (e.g. with 'sudo install -d -m 0755 -o $UID $dest') and re-run this script."
exit 1
fi
dest_mode_and_uid="$(stat -c "%f %u" "$dest")"
if [ "$dest_mode_and_uid" != "41ed $UID" ]; then # 040755
die "'$dest' is not a directory with uid='$UID' and mode='0755'."
fi
if [ -n "$(ls "$dest")" ]; then
die "'$dest' is not empty."
fi
}
require_util date
require_util curl
require_util sha256sum
require_util tar
tarball_url=https://github.com/nspin/minimally-invasive-nix-installer/raw/dist-y64jgzkzg5/dist/min-nix-2.5pre20211007_844dd90-aarch64-linux.tar.gz
tarball_sha256=8fe9c19669faf569d7119c445fcadf97b802f5cbbc4fbfce06670ee08714de20
archive_name=min-nix-2.5pre20211007_844dd90-aarch64-linux
env_store_path=/nix/store/3vrk43n2xnsd33lhy2vxn1p1c82akm4k-min-nix-env-2.5pre20211007_844dd90
dest=/nix
check_dest
tarball_path="${TMPDIR:-/tmp}/$archive_name-$(date +%s).tar.gz"
log "Fetching '$tarball_url' to '$tarball_path'..."
trap "{ rm -f $tarball_path; }" EXIT
curl -fL -o "$tarball_path" "$tarball_url"
log "Verifying the integrity of '$tarball_path'..."
observed_sha256="$(sha256sum "$tarball_path" | cut -c 1-64)"
if [ "$tarball_sha256" != "$observed_sha256" ]; then
die "SHA-256 hash mismatch for '$tarball_path': expected $tarball_sha256, got $observed_sha256."
fi
log "Unpacking '$tarball_path' into '$dest'..."
tar -xz -f $tarball_path -C $dest --strip 1 --delay-directory-restore
chmod u+w $dest/store
log "Initializing Nix database..."
$env_store_path/bin/nix-store --init
$env_store_path/bin/nix-store --load-db < $dest/reginfo
rm -f $dest/reginfo
log "Establishing Nix environment..."
$env_store_path/bin/nix-store --realise --add-root $dest/env --indirect $env_store_path > /dev/null
log
log "SUCCESS"
log
log "To uninstall, simply remove '/nix'."
log "You may want to create '/etc/nix/nix.conf'."
log "Add the following to your shell init:"
log
log " export PATH=\"$dest/env/bin:\$PATH\""
log " export MANPATH=\"$dest/env/share/man:\$MANPATH\""
log " export NIX_SSL_CERT_FILE=$dest/env/etc/ssl/certs/ca-bundle.crt # or your favorite cert bundle"
log
}
|
#!/bin/bash
# Linux
# sudo apt-get update && sudo apt-get install libsdl2-dev libsdl2-mixer-dev libvorbis-dev
# macOS
brew install sdl2 sdl2_mixer libvorbis libogg
|
<gh_stars>1-10
import React from 'react';
import PropTypes from 'prop-types';
import {isEmpty, isNil} from 'lodash';
import LoadingIndicator from './LoadingIndicator';
import TableNullContent from './TableNullContent';
const TableContainer = ({
loading, entities, message, children
}) => {
if (loading) {
return <LoadingIndicator/>;
}
if (isNil(entities)) {
return null;
}
if (isEmpty(entities)) {
return <TableNullContent message={message}/>;
}
return children;
};
TableContainer.propTypes = {
entities: PropTypes.oneOfType([
PropTypes.arrayOf(PropTypes.shape({})),
PropTypes.shape({})
]),
loading: PropTypes.bool.isRequired,
message: PropTypes.string
};
TableContainer.defaultProps = {
entities: null,
message: 'No se encontraron resultados'
};
export default TableContainer;
|
import React from 'react';
import PropTypes from 'prop-types';
import { Bjorn } from '@navikt/digisyfo-npm';
import { fjernIndexFraTag } from './fieldUtils';
import { JOBBET_DU_100_PROSENT, JOBBET_DU_GRADERT } from '../../enums/tagtyper';
import { soknadPt } from '../../../propTypes/index';
import { ARBEIDSTAKERE } from '../../enums/soknadtyper';
const tagsMedBjorn = {
[ARBEIDSTAKERE]: [JOBBET_DU_GRADERT, JOBBET_DU_100_PROSENT],
};
const harBjorntekst = (tag, soknadstype) => {
return tagsMedBjorn[soknadstype]
&& tagsMedBjorn[soknadstype].indexOf(fjernIndexFraTag(tag)) > -1;
};
const hentBjornNokkel = (tag) => {
const tagUtenIndex = fjernIndexFraTag(tag);
return `soknad.bjorn.${tagUtenIndex.toLowerCase()}`;
};
const SporsmalBjorn = ({ tag, className, soknad }) => {
return harBjorntekst(tag, soknad.soknadstype)
? <Bjorn className={className} nokkel={hentBjornNokkel(tag)} />
: null;
};
SporsmalBjorn.propTypes = {
tag: PropTypes.string,
className: PropTypes.string,
soknad: soknadPt,
};
export default SporsmalBjorn;
|
import { exec, matches, replace } from 'utils'
import { INodeBlockquote, INodeParagraph, IParsed, ITokenizer } from 'models'
const execBlockquote =
exec(/^( {0,3}>[^\n]+(\n(?! *\[([^\]]+)\]: *<?([^\s>]+)>?(?: +["(]([^\n]+)[")])? *(?:\n+|$))[^\n]+)*\n*)+/)
const clearBlockquote = replace(/^ *> ?/gm, '')
const probablyBlockquote = matches(/^ {0,3}>/)
const captureBlockquote = (source: string, tokenize: ITokenizer): IParsed<INodeBlockquote> | null => {
if (!probablyBlockquote(source)) {
return null
}
const result = execBlockquote(source)
if (!result) {
return null
}
const capture = result[0]
let children = tokenize(clearBlockquote(capture))
if (children.length === 1 && children[0].type === 'paragraph') {
children = (children[0] as INodeParagraph).children
}
return {
token: {
type: 'blockquote',
children: tokenize(clearBlockquote(capture))
},
newSource: source.substring(capture.length)
}
}
export { captureBlockquote }
|
#!/bin/bash
set -x
set -e
# 设置数据库
echo ' ===> 1.启动 mysql...'
service mysql start
# sleep 5s
service mysql status
echo ' ===> 2.创建库和表...'
bash setup-data.sh
echo '导入完毕...'
service mysql status
echo ' ===> 3.初始化完成...'
|
<gh_stars>0
import java.util.*;
import java.io.*;
public class Exercise12_13 {
public static void main (String[] args) {
File localFile = new File(args[0]);
int lines = 0;
int words = 0;
int chars = 0;
try {
Scanner fileContent = new Scanner(localFile);
while (fileContent.hasNextLine()) {
String line = fileContent.nextLine();
lines++;
chars += line.length();
}
} catch (FileNotFoundException ex) {}
try {
Scanner fileContent = new Scanner(localFile);
while (fileContent.hasNext()) {
fileContent.next();
words++;
}
} catch (FileNotFoundException ex) {}
System.out.printf("File %s has", args[0]);
System.out.printf("\n%d characters", chars);
System.out.printf("\n%d words", words);
System.out.printf("\n%d lines\n", lines);
}
}
|
#!/bin/bash
cd ${0%/*}
source ../config.sh
DB_SCHEMA="s15"
[[ "$DEBUG" = 1 ]] && NODE_COMMAND=node-debug || NODE_COMMAND=node
# 0) Create database - mysql
sudo -u postgres psql -c "DROP SCHEMA IF EXISTS $DB_SCHEMA CASCADE" "$DB_NAME"
sudo -u postgres psql -c "CREATE SCHEMA $DB_SCHEMA AUTHORIZATION $DB_USER;" "$DB_NAME"
sudo -u postgres psql -c "CREATE EXTENSION \"uuid-ossp\" SCHEMA $DB_SCHEMA;" "$DB_NAME"
# 1) Do stuff in Node
printf "\n\n############################# PGres #############################\n"
$NODE_COMMAND 01-populate.js
printf "#################################################################\n\n\n"
|
#!/bin/bash
# This script provides common script functions for the hacks
# Requires S2I_ROOT to be set
set -o errexit
set -o nounset
set -o pipefail
# The root of the build/dist directory
S2I_ROOT=$(
unset CDPATH
sti_root=$(dirname "${BASH_SOURCE}")/..
cd "${sti_root}"
pwd
)
S2I_OUTPUT_SUBPATH="${S2I_OUTPUT_SUBPATH:-_output/local}"
S2I_OUTPUT="${S2I_ROOT}/${S2I_OUTPUT_SUBPATH}"
S2I_OUTPUT_BINPATH="${S2I_OUTPUT}/bin"
S2I_OUTPUT_PKGDIR="${S2I_OUTPUT}/pkgdir"
S2I_LOCAL_BINPATH="${S2I_OUTPUT}/go/bin"
S2I_LOCAL_RELEASEPATH="${S2I_OUTPUT}/releases"
RELEASE_LDFLAGS=${RELEASE_LDFLAGS:-""}
readonly S2I_GO_PACKAGE=github.com/openshift/source-to-image
readonly S2I_GOPATH="${S2I_OUTPUT}/go"
readonly S2I_CROSS_COMPILE_PLATFORMS=(
linux/amd64
darwin/amd64
windows/amd64
linux/386
)
readonly S2I_CROSS_COMPILE_TARGETS=(
cmd/s2i
)
readonly S2I_CROSS_COMPILE_BINARIES=("${S2I_CROSS_COMPILE_TARGETS[@]##*/}")
readonly S2I_ALL_TARGETS=(
"${S2I_CROSS_COMPILE_TARGETS[@]}"
)
readonly S2I_BINARY_SYMLINKS=(
sti
)
readonly S2I_BINARY_RELEASE_WINDOWS=(
sti.exe
s2i.exe
)
# s2i::build::binaries_from_targets take a list of build targets and return the
# full go package to be built
s2i::build::binaries_from_targets() {
local target
for target; do
echo "${S2I_GO_PACKAGE}/${target}"
done
}
# Asks golang what it thinks the host platform is. The go tool chain does some
# slightly different things when the target platform matches the host platform.
s2i::build::host_platform() {
echo "$(go env GOHOSTOS)/$(go env GOHOSTARCH)"
}
# Build binaries targets specified
#
# Input:
# $@ - targets and go flags. If no targets are set then all binaries targets
# are built.
# S2I_BUILD_PLATFORMS - Incoming variable of targets to build for. If unset
# then just the host architecture is built.
s2i::build::build_binaries() {
# Create a sub-shell so that we don't pollute the outer environment
(
# Check for `go` binary and set ${GOPATH}.
s2i::build::setup_env
# Fetch the version.
local version_ldflags
version_ldflags=$(s2i::build::ldflags)
s2i::build::export_targets "$@"
local platform
for platform in "${platforms[@]}"; do
s2i::build::set_platform_envs "${platform}"
echo "++ Building go targets for ${platform}:" "${targets[@]}"
CGO_ENABLED=0 go install "${goflags[@]:+${goflags[@]}}" \
-pkgdir "${S2I_OUTPUT_PKGDIR}" \
-ldflags "${version_ldflags} ${RELEASE_LDFLAGS}" \
"${binaries[@]}"
s2i::build::unset_platform_envs "${platform}"
done
)
}
# Generates the set of target packages, binaries, and platforms to build for.
# Accepts binaries via $@, and platforms via S2I_BUILD_PLATFORMS, or defaults to
# the current platform.
s2i::build::export_targets() {
# Use eval to preserve embedded quoted strings.
local goflags
eval "goflags=(${S2I_GOFLAGS:-})"
targets=()
local arg
for arg; do
if [[ "${arg}" == -* ]]; then
# Assume arguments starting with a dash are flags to pass to go.
goflags+=("${arg}")
else
targets+=("${arg}")
fi
done
if [[ ${#targets[@]} -eq 0 ]]; then
targets=("${S2I_ALL_TARGETS[@]}")
fi
binaries=($(s2i::build::binaries_from_targets "${targets[@]}"))
platforms=("${S2I_BUILD_PLATFORMS[@]:+${S2I_BUILD_PLATFORMS[@]}}")
if [[ ${#platforms[@]} -eq 0 ]]; then
platforms=("$(s2i::build::host_platform)")
fi
}
# Takes the platform name ($1) and sets the appropriate golang env variables
# for that platform.
s2i::build::set_platform_envs() {
[[ -n ${1-} ]] || {
echo "!!! Internal error. No platform set in s2i::build::set_platform_envs"
exit 1
}
export GOOS=${platform%/*}
export GOARCH=${platform##*/}
}
# Takes the platform name ($1) and resets the appropriate golang env variables
# for that platform.
s2i::build::unset_platform_envs() {
unset GOOS
unset GOARCH
}
# Create the GOPATH tree under $S2I_ROOT
s2i::build::create_gopath_tree() {
local go_pkg_dir="${S2I_GOPATH}/src/${S2I_GO_PACKAGE}"
local go_pkg_basedir=$(dirname "${go_pkg_dir}")
mkdir -p "${go_pkg_basedir}"
rm -f "${go_pkg_dir}"
# TODO: This symlink should be relative.
if [[ "$OSTYPE" == "cygwin" ]]; then
S2I_ROOT_cyg=$(cygpath -w ${S2I_ROOT})
go_pkg_dir_cyg=$(cygpath -w ${go_pkg_dir})
cmd /c "mklink ${go_pkg_dir_cyg} ${S2I_ROOT_cyg}" &>/dev/null
else
ln -s "${S2I_ROOT}" "${go_pkg_dir}"
fi
}
# s2i::build::setup_env will check that the `go` commands is available in
# ${PATH}. If not running on Travis, it will also check that the Go version is
# good enough for the Kubernetes build.
#
# Input Vars:
# S2I_EXTRA_GOPATH - If set, this is included in created GOPATH
# S2I_NO_GODEPS - If set, we don't add 'vendor' to GOPATH
#
# Output Vars:
# export GOPATH - A modified GOPATH to our created tree along with extra
# stuff.
# export GOBIN - This is actively unset if already set as we want binaries
# placed in a predictable place.
s2i::build::setup_env() {
s2i::build::create_gopath_tree
if [[ -z "$(which go)" ]]; then
cat <<EOF
Can't find 'go' in PATH, please fix and retry.
See http://golang.org/doc/install for installation instructions.
EOF
exit 2
fi
# Travis continuous build uses a head go release that doesn't report
# a version number, so we skip this check on Travis. It's unnecessary
# there anyway.
if [[ "${TRAVIS:-}" != "true" ]]; then
local go_version
go_version=($(go version))
if [[ "${go_version[2]}" < "go1.4" ]]; then
cat <<EOF
Detected go version: ${go_version[*]}.
S2I requires go version 1.4 or greater.
Please install Go version 1.4 or later.
EOF
exit 2
fi
fi
# For any tools that expect this to be set (it is default in golang 1.6),
# force vendor experiment.
export GO15VENDOREXPERIMENT=1
GOPATH=${S2I_GOPATH}
# Append S2I_EXTRA_GOPATH to the GOPATH if it is defined.
if [[ -n ${S2I_EXTRA_GOPATH:-} ]]; then
GOPATH="${GOPATH}:${S2I_EXTRA_GOPATH}"
fi
# Append the tree maintained by `godep` to the GOPATH unless S2I_NO_GODEPS
# is defined.
if [[ -z ${S2I_NO_GODEPS:-} ]]; then
GOPATH="${GOPATH}:${S2I_ROOT}/vendor"
fi
if [[ "$OSTYPE" == "cygwin" ]]; then
GOPATH=$(cygpath -w -p $GOPATH)
fi
export GOPATH
# Unset GOBIN in case it already exists in the current session.
unset GOBIN
}
# This will take binaries from $GOPATH/bin and copy them to the appropriate
# place in ${S2I_OUTPUT_BINDIR}
#
# If S2I_RELEASE_ARCHIVE is set to a directory, it will have tar archives of
# each S2I_RELEASE_PLATFORMS created
#
# Ideally this wouldn't be necessary and we could just set GOBIN to
# S2I_OUTPUT_BINDIR but that won't work in the face of cross compilation. 'go
# install' will place binaries that match the host platform directly in $GOBIN
# while placing cross compiled binaries into `platform_arch` subdirs. This
# complicates pretty much everything else we do around packaging and such.
s2i::build::place_bins() {
(
local host_platform
host_platform=$(s2i::build::host_platform)
echo "++ Placing binaries"
if [[ "${S2I_RELEASE_ARCHIVE-}" != "" ]]; then
s2i::build::get_version_vars
mkdir -p "${S2I_LOCAL_RELEASEPATH}"
fi
s2i::build::export_targets "$@"
for platform in "${platforms[@]}"; do
# The substitution on platform_src below will replace all slashes with
# underscores. It'll transform darwin/amd64 -> darwin_amd64.
local platform_src="/${platform//\//_}"
if [[ $platform == $host_platform ]]; then
platform_src=""
fi
# Skip this directory if the platform has no binaries.
local full_binpath_src="${S2I_GOPATH}/bin${platform_src}"
if [[ ! -d "${full_binpath_src}" ]]; then
continue
fi
mkdir -p "${S2I_OUTPUT_BINPATH}/${platform}"
# Create an array of binaries to release. Append .exe variants if the platform is windows.
local -a binaries=()
for binary in "${targets[@]}"; do
binary=$(basename $binary)
if [[ $platform == "windows/amd64" ]]; then
binaries+=("${binary}.exe")
else
binaries+=("${binary}")
fi
done
# Move the specified release binaries to the shared S2I_OUTPUT_BINPATH.
for binary in "${binaries[@]}"; do
mv "${full_binpath_src}/${binary}" "${S2I_OUTPUT_BINPATH}/${platform}/"
done
# If no release archive was requested, we're done.
if [[ "${S2I_RELEASE_ARCHIVE-}" == "" ]]; then
continue
fi
# Create a temporary bin directory containing only the binaries marked for release.
local release_binpath=$(mktemp -d sti.release.${S2I_RELEASE_ARCHIVE}.XXX)
for binary in "${binaries[@]}"; do
cp "${S2I_OUTPUT_BINPATH}/${platform}/${binary}" "${release_binpath}/"
done
# Create binary copies where specified.
local suffix=""
if [[ $platform == "windows/amd64" ]]; then
suffix=".exe"
fi
for linkname in "${S2I_BINARY_SYMLINKS[@]}"; do
local src="${release_binpath}/s2i${suffix}"
if [[ -f "${src}" ]]; then
ln -s "s2i${suffix}" "${release_binpath}/${linkname}${suffix}"
fi
done
# Create the release archive.
local platform_segment="${platform//\//-}"
if [[ $platform == "windows/amd64" ]]; then
local archive_name="${S2I_RELEASE_ARCHIVE}-${S2I_GIT_VERSION}-${S2I_GIT_COMMIT}-${platform_segment}.zip"
echo "++ Creating ${archive_name}"
for file in "${S2I_BINARY_RELEASE_WINDOWS[@]}"; do
zip "${S2I_LOCAL_RELEASEPATH}/${archive_name}" -qj "${release_binpath}/${file}"
done
else
local archive_name="${S2I_RELEASE_ARCHIVE}-${S2I_GIT_VERSION}-${S2I_GIT_COMMIT}-${platform_segment}.tar.gz"
echo "++ Creating ${archive_name}"
tar -czf "${S2I_LOCAL_RELEASEPATH}/${archive_name}" -C "${release_binpath}" .
fi
rm -rf "${release_binpath}"
done
)
}
# s2i::build::make_binary_symlinks makes symlinks for the sti
# binary in _output/local/go/bin
s2i::build::make_binary_symlinks() {
platform=$(s2i::build::host_platform)
if [[ -f "${S2I_OUTPUT_BINPATH}/${platform}/s2i" ]]; then
for linkname in "${S2I_BINARY_SYMLINKS[@]}"; do
if [[ $platform == "windows/amd64" ]]; then
cp "${S2I_OUTPUT_BINPATH}/${platform}/s2i.exe" "${S2I_OUTPUT_BINPATH}/${platform}/${linkname}.exe"
else
ln -sf s2i "${S2I_OUTPUT_BINPATH}/${platform}/${linkname}"
fi
done
fi
}
# s2i::build::detect_local_release_tars verifies there is only one primary and one
# image binaries release tar in S2I_LOCAL_RELEASEPATH for the given platform specified by
# argument 1, exiting if more than one of either is found.
#
# If the tars are discovered, their full paths are exported to the following env vars:
#
# S2I_PRIMARY_RELEASE_TAR
s2i::build::detect_local_release_tars() {
local platform="$1"
if [[ ! -d "${S2I_LOCAL_RELEASEPATH}" ]]; then
echo "There are no release artifacts in ${S2I_LOCAL_RELEASEPATH}"
exit 2
fi
if [[ ! -f "${S2I_LOCAL_RELEASEPATH}/.commit" ]]; then
echo "There is no release .commit identifier ${S2I_LOCAL_RELEASEPATH}"
exit 2
fi
local primary=$(find ${S2I_LOCAL_RELEASEPATH} -maxdepth 1 -type f -name source-to-image-*-${platform}*)
if [[ $(echo "${primary}" | wc -l) -ne 1 ]]; then
echo "There should be exactly one ${platform} primary tar in $S2I_LOCAL_RELEASEPATH"
exit 2
fi
export S2I_PRIMARY_RELEASE_TAR="${primary}"
export S2I_RELEASE_COMMIT="$(cat ${S2I_LOCAL_RELEASEPATH}/.commit)"
}
# s2i::build::get_version_vars loads the standard version variables as
# ENV vars
s2i::build::get_version_vars() {
if [[ -n ${S2I_VERSION_FILE-} ]]; then
source "${S2I_VERSION_FILE}"
return
fi
s2i::build::sti_version_vars
}
# s2i::build::sti_version_vars looks up the current Git vars
s2i::build::sti_version_vars() {
local git=(git --work-tree "${S2I_ROOT}")
if [[ -n ${S2I_GIT_COMMIT-} ]] || S2I_GIT_COMMIT=$("${git[@]}" rev-parse --short "HEAD^{commit}" 2>/dev/null); then
if [[ -z ${S2I_GIT_TREE_STATE-} ]]; then
# Check if the tree is dirty. default to dirty
if git_status=$("${git[@]}" status --porcelain 2>/dev/null) && [[ -z ${git_status} ]]; then
S2I_GIT_TREE_STATE="clean"
else
S2I_GIT_TREE_STATE="dirty"
fi
fi
# Use git describe to find the version based on annotated tags.
if [[ -n ${S2I_GIT_VERSION-} ]] || S2I_GIT_VERSION=$("${git[@]}" describe --tags "${S2I_GIT_COMMIT}^{commit}" 2>/dev/null); then
if [[ "${S2I_GIT_TREE_STATE}" == "dirty" ]]; then
# git describe --dirty only considers changes to existing files, but
# that is problematic since new untracked .go files affect the build,
# so use our idea of "dirty" from git status instead.
S2I_GIT_VERSION+="-dirty"
fi
# Try to match the "git describe" output to a regex to try to extract
# the "major" and "minor" versions and whether this is the exact tagged
# version or whether the tree is between two tagged versions.
if [[ "${S2I_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)([.-].*)?$ ]]; then
S2I_GIT_MAJOR=${BASH_REMATCH[1]}
S2I_GIT_MINOR=${BASH_REMATCH[2]}
if [[ -n "${BASH_REMATCH[3]}" ]]; then
S2I_GIT_MINOR+="+"
fi
fi
fi
fi
}
# Saves the environment flags to $1
s2i::build::save_version_vars() {
local version_file=${1-}
[[ -n ${version_file} ]] || {
echo "!!! Internal error. No file specified in s2i::build::save_version_vars"
return 1
}
cat <<EOF >"${version_file}"
S2I_GIT_COMMIT='${S2I_GIT_COMMIT-}'
S2I_GIT_TREE_STATE='${S2I_GIT_TREE_STATE-}'
S2I_GIT_VERSION='${S2I_GIT_VERSION-}'
S2I_GIT_MAJOR='${S2I_GIT_MAJOR-}'
S2I_GIT_MINOR='${S2I_GIT_MINOR-}'
EOF
}
# golang 1.5 wants `-X key=val`, but golang 1.4- REQUIRES `-X key val`
s2i::build::ldflag() {
local key=${1}
local val=${2}
GO_VERSION=($(go version))
if [[ -n $(echo "${GO_VERSION[2]}" | grep -E 'go1.4') ]]; then
echo "-X ${S2I_GO_PACKAGE}/pkg/version.${key} ${val}"
else
echo "-X ${S2I_GO_PACKAGE}/pkg/version.${key}=${val}"
fi
}
# s2i::build::ldflags calculates the -ldflags argument for building STI
s2i::build::ldflags() {
(
# Run this in a subshell to prevent settings/variables from leaking.
set -o errexit
set -o nounset
set -o pipefail
cd "${S2I_ROOT}"
s2i::build::get_version_vars
declare -a ldflags=()
ldflags+=($(s2i::build::ldflag "majorFromGit" "${S2I_GIT_MAJOR}"))
ldflags+=($(s2i::build::ldflag "minorFromGit" "${S2I_GIT_MINOR}"))
ldflags+=($(s2i::build::ldflag "versionFromGit" "${S2I_GIT_VERSION}"))
ldflags+=($(s2i::build::ldflag "commitFromGit" "${S2I_GIT_COMMIT}"))
# The -ldflags parameter takes a single string, so join the output.
echo "${ldflags[*]-}"
)
}
|
package fwcd.fructose.chiffre.huffman;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.UncheckedIOException;
import java.util.HashMap;
import java.util.Map;
import java.util.PriorityQueue;
import java.util.Queue;
import fwcd.fructose.Distribution;
import fwcd.fructose.Distribution.Normalizer;
import fwcd.fructose.chiffre.StringEncoding;
import fwcd.fructose.exception.Rethrow;
/**
* An implementation of the
* lossless Huffman compression algorithm.
*/
public class Huffman implements StringEncoding {
@Override
public byte[] encode(String data) {
HuffmanTree tree = generateTree(data);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try (ObjectOutputStream oos = new ObjectOutputStream(baos)) {
oos.writeObject(tree);
for (char c : data.toCharArray()) {
for (boolean bit : tree.encode(c)) {
oos.writeBoolean(bit);
}
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return baos.toByteArray();
}
public HuffmanTree generateTree(String data) {
Queue<HuffmanTree> queue = toPriorityQueue(distributionOf(data));
while (queue.size() > 1) {
// Pick the two items with the lowest probability (thus highest priority)
HuffmanTree a = queue.poll();
HuffmanTree b = queue.poll();
// Probabilities of the child trees are added together
queue.offer(new HuffmanTree(a, b));
}
HuffmanTree tree = queue.poll();
return tree;
}
@Override
public String decode(byte[] data) {
ByteArrayInputStream bais = new ByteArrayInputStream(data);
StringBuilder s = new StringBuilder();
try (ObjectInputStream ois = new ObjectInputStream(bais)) {
HuffmanTree tree = (HuffmanTree) ois.readObject();
while (ois.available() > 0) {
s.append(tree.decode(() -> {
try {
return ois.readBoolean();
} catch (IOException e) { throw new UncheckedIOException(e); }
}));
}
} catch (IOException e) {
throw new UncheckedIOException(e);
} catch (ClassNotFoundException e) {
throw new Rethrow(e);
}
return s.toString();
}
private Distribution<Character> distributionOf(String input) {
Map<Character, Double> dist = new HashMap<>();
for (char c : input.toCharArray()) {
if (dist.containsKey(c)) {
dist.put(c, dist.get(c) + 1D);
} else {
dist.put(c, 0D);
}
}
return new Distribution<>(dist, Normalizer.NORMALIZE);
}
private Queue<HuffmanTree> toPriorityQueue(Distribution<Character> dist) {
Queue<HuffmanTree> queue = new PriorityQueue<>();
dist.forEach((c, probability) -> queue.add(new HuffmanTree(c, probability)));
return queue;
}
}
|
<filename>tests/dummy/app/router.js
import AddonDocsRouter, { docsRoute } from "ember-cli-addon-docs/router";
import config from "./config/environment";
const Router = AddonDocsRouter.extend({
location: config.locationType,
rootURL: config.rootURL
});
Router.map(function() {
docsRoute(this, function() {
this.route("quickstart");
this.route("customization");
this.route("treeshaking");
this.route("patterns", function() {
this.route("native");
this.route("custom-options");
this.route("time-picker");
this.route("multiple-selections");
this.route("autocomplete");
this.route("action-menu");
this.route("standalone-select");
});
this.route("components", function() {
this.route("select-native");
this.route("select-listbox");
this.route("select-combobox");
this.route("select-menu");
});
});
this.route("not-found", { path: "/*path" });
});
export default Router;
|
<reponame>Clymsw/xHVI-2020
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 1 17:28:39 2020
@author: stockwilliamscfw
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import rcParams
import plotFunctions as plots
import pickle
import numpy as np
# from aerofoilSvg import Aerofoil
import paretoFrontND as pf
plt.style.use('ggplot')
rcParams['font.sans-serif'] = "Segoe UI"
rcParams['font.family'] = "sans-serif"
plot_size = 10.0
num_lhs = 13*8
max_iterations = 800
# commercial_foils_to_plot = [0,5]
commercial_foils_to_plot = np.arange(0,13)
# filePathOut = '.\\opt{}_results.pkl'.format(RUN)
x_hypi = []
y_hypi = []
t_hypi = []
x_xhvi = []
y_xhvi = []
t_xhvi = []
x_ehvi = []
y_ehvi = []
t_ehvi = []
for run in range(5):
filePath = '.\\HypI\\Run{}\\aero_fitness_struc_TH18_xhvi_final.pkl'.format(run+1)
with open(filePath, 'rb') as f:
temp_x, temp_y,_,_,_,_,_,temp_t = pickle.load(f)
x_hypi.append(temp_x[num_lhs:max_iterations,:])
y_hypi.append(temp_y[num_lhs:max_iterations,:])
t_hypi.append(temp_t[:max_iterations-num_lhs+1])
filePath = '.\\xHVI\\Run{}\\aero_fitness_struc_TH18_xhvi_final.pkl'.format(run+1)
with open(filePath, 'rb') as f:
temp_x, temp_y,_,_,_,_,_,temp_t = pickle.load(f)
x_xhvi.append(temp_x[num_lhs:max_iterations,:])
y_xhvi.append(temp_y[num_lhs:max_iterations,:])
t_xhvi.append(temp_t[:max_iterations-num_lhs+1])
filePath = '.\\EHVI\\sim-{}\\aero_fitness_struc_TH18_xhvi_final.pkl'.format(run+6)
with open(filePath, 'rb') as f:
temp_x, temp_y,_,_,_,_,temp_t = pickle.load(f)
x_ehvi.append(temp_x[num_lhs:max_iterations,:])
y_ehvi.append(temp_y[num_lhs:max_iterations,:])
t_ehvi.append(temp_t[:max_iterations-num_lhs+1])
filePathRef = 'existingfoils_TH18.pkl'
with open(filePathRef, 'rb') as f:
ref_names,y_ref = pickle.load(f)
# fig = plt.figure(figsize=[plot_size * 1.62, plot_size * 1.0], tight_layout=True)
# plot1 = plt.plot(-ehvi_y_toplot[ehvi_y_toplot[:,0]<0,0], -ehvi_y_toplot[ehvi_y_toplot[:,1]<0,1],
# '^k')
# plt.plot(-hypi_y_toplot[hypi_y_toplot[:,0]<0,0], -hypi_y_toplot[hypi_y_toplot[:,1]<0,1],
# 'Hg')
# plt.plot(-xhvi_y_toplot[xhvi_y_toplot[:,0]<0,0], -xhvi_y_toplot[xhvi_y_toplot[:,0]<0,1],
# 'vb')
# plt.plot(y_ref[commercial_foils_to_plot,0], y_ref[commercial_foils_to_plot,1],
# 'or')
# plt.xlabel('Integrated lift:drag ratio', fontsize=plot_size*3.0)
# plt.ylabel('Flapwise bending stiffness ($Nm^2$)', fontsize=plot_size*3.0)
# plt.legend(["EHVI Pareto Front".format(max_iterations),
# "HypI Pareto Front".format(max_iterations),
# "xHVI Pareto Front".format(max_iterations),
# 'Reference aerofoils'],
# loc='center left', fontsize=plot_size*2.5)
# [hypi_y, xhvi_y, ehvi_y]
# plt.xlim(0, 2250)
# plt.ylim(0, 8e-4)
# for tick in plot1[0].axes.get_xticklabels():
# tick.set_fontsize(plot_size*2.5)
# for tick in plot1[0].axes.get_yticklabels():
# tick.set_fontsize(plot_size*2.5)
# texts = []
# for i in range(len(commercial_foils_to_plot)):
# name = ref_names[commercial_foils_to_plot[i]][:-4]
# texts.append(
# plt.text(y_ref[commercial_foils_to_plot[i],0],# + 20,
# y_ref[commercial_foils_to_plot[i],1],# - 0.000005,
# name, fontsize=plot_size*2))
# adjust_text(texts, arrowprops=dict(arrowstyle='-', color='red'))
# plt.savefig("aerofoil_pareto_front_R{0}_N{1}.eps".format(RUN, max_iterations), facecolor=None, edgecolor=None)
# plt.savefig("aerofoil_pareto_front_R{0}_N{1}.png".format(RUN, max_iterations), facecolor=None, edgecolor=None)
# best_aerofoil_clcd_index_ehvi = np.argmin(ehvi_y[:,0])
# best_aerofoil_clcd_index_ehvi = np.argmin(ehvi_y[:,0])
# best_aerofoil_clcd_index_ehvi = np.argmin(ehvi_y[:,0])
# aerofoil_thickness_ratio = 0.18
# def convert_to_aerofoil(ind):
# return Aerofoil(aerofoil_thickness_ratio,
# ind[0], ind[1], ind[2], ind[3], ind[4], ind[5], ind[6],
# ind[7], ind[8], ind[9], ind[10], ind[11], ind[12])
# best_aerofoil_clcd_ehvi = convert_to_aerofoil(ehvi_x[best_aerofoil_clcd_index_ehvi,:])
# best_aerofoil_clcd_ehvi.write_to_file('aerofoil_ehvi_{}.svg'.format(best_aerofoil_clcd_index_ehvi))
# with open(filePathOut, 'wb') as f:
# pickle.dump([hypi_y, xhvi_y, ehvi_y], f)
d1ClCd = np.linspace(0., 2250., 1000)
d1Stiffness = np.linspace(0., 8e-4, 1000)
[d2ClCd, d2Stiffness] = np.meshgrid(d1ClCd, d1Stiffness)
print('Calculating HypI EAF')
d2ClCd_hypi, d2Stiffness_hypi, d2Eaf_hypi = pf.calculateEmpiricalAttainmentFunction(
y_hypi, np.array([[-2250., 0.], [-1e-3, 0.]]))
print('Calculating xHVI EAF')
d2ClCd_xhvi, d2Stiffness_xhvi, d2Eaf_xhvi = pf.calculateEmpiricalAttainmentFunction(
y_xhvi, np.array([[-2250., 0.], [-1e-3, 0.]]))
print('Calculating EHVI EAF')
d2ClCd_ehvi, d2Stiffness_ehvi, d2Eaf_ehvi = pf.calculateEmpiricalAttainmentFunction(
y_ehvi, np.array([[-2250., 0.], [-1e-3, 0.]]))
# %% Plot EAFs
print('Plotting...')
fig = plt.figure(figsize=[plot_size * 1., plot_size * 1.62], tight_layout=True)
gs = gridspec.GridSpec(ncols=1, nrows=3, figure=fig, height_ratios=[1,1,1], hspace=0.25, wspace=0.2)
names = []
for i in range(len(commercial_foils_to_plot)):
names.append(ref_names[commercial_foils_to_plot[i]][:-4])
ax = fig.add_subplot(gs[0, 0])
names_to_plot = ['' for x in range(len(names))]
names_to_plot[::3] = names[::3]
plots.plot_contour_with_points(-d2ClCd_hypi, -d2Stiffness_hypi, d2Eaf_hypi,
[0,0.5,0.999], 'black', ['-','--','-'],
y_ref[commercial_foils_to_plot,0], y_ref[commercial_foils_to_plot,1],
names_to_plot,
sYLabel='Flapwise bending\n stiffness ($Nm^2$)',
sTitle='HypI')
ax = fig.add_subplot(gs[1, 0])
names_to_plot = ['' for x in range(len(names))]
names_to_plot[1::3] = names[1::3]
plots.plot_contour_with_points(-d2ClCd_xhvi, -d2Stiffness_xhvi, d2Eaf_xhvi,
[0,0.5,0.999], 'black', ['-','--','-'],
y_ref[commercial_foils_to_plot,0], y_ref[commercial_foils_to_plot,1],
names_to_plot,
sYLabel='Flapwise bending\n stiffness ($Nm^2$)',
sTitle='xHVI')
ax = fig.add_subplot(gs[2, 0])
names_to_plot = ['' for x in range(len(names))]
names_to_plot[2::3] = names[2::3]
plots.plot_contour_with_points(-d2ClCd_ehvi, -d2Stiffness_ehvi, d2Eaf_ehvi,
[0,0.5,0.999], 'black', ['-','--','-'],
y_ref[commercial_foils_to_plot,0], y_ref[commercial_foils_to_plot,1],
names_to_plot,
sXLabel='Integrated lift:drag ratio',
sYLabel='Flapwise bending\n stiffness ($Nm^2$)',
sTitle='EHVI')
plt.savefig("aerofoil_eafs_N{0}.eps".format(max_iterations), facecolor=None, edgecolor=None)
plt.savefig("aerofoil_eafs_N{0}.png".format(max_iterations), facecolor=None, edgecolor=None)
plt.savefig("aerofoil_eafs_N{0}.svg".format(max_iterations), facecolor=None, edgecolor=None)
# %% Plot EAF difference from EHVI
fig = plt.figure(figsize=[plot_size * 1., plot_size * 1.62], tight_layout=True)
gs = gridspec.GridSpec(ncols=1, nrows=2, figure=fig, height_ratios=[1,1], hspace=0.25, wspace=0.2)
masked_data = (d2Eaf_ehvi - d2Eaf_hypi)*100.
masked_data = np.ma.masked_where((masked_data < 10.) * (masked_data > -10.), masked_data)
ax = fig.add_subplot(gs[0, 0])
names_to_plot = ['' for x in range(len(names))]
names_to_plot[::2] = names[::2]
cb = plots.plot_contours_with_points(-d2ClCd_ehvi, -d2Stiffness_ehvi, masked_data,
np.arange(-100, 101, 20), 'RdYlGn_r',
-d2ClCd_hypi, -d2Stiffness_hypi, d2Eaf_hypi,
[0,0.5,0.999], 'black', ['-','--','-'],
y_ref[commercial_foils_to_plot,0], y_ref[commercial_foils_to_plot,1],
names_to_plot,
sXLabel='Integrated lift:drag ratio',
sYLabel='Flapwise bending\n stiffness ($Nm^2$)',
sTitle='HypI',
sColorbarLabel='Empirical Attainment Front Difference\n (compared with EHVI)')
masked_data = (d2Eaf_ehvi - d2Eaf_xhvi)*100.
masked_data = np.ma.masked_where((masked_data < 10.) * (masked_data > -10.), masked_data)
ax = fig.add_subplot(gs[1, 0])
names_to_plot = ['' for x in range(len(names))]
names_to_plot[1::2] = names[1::2]
plots.plot_contours_with_points(-d2ClCd_ehvi, -d2Stiffness_ehvi, masked_data,
np.arange(-100, 101, 20), 'RdYlGn_r',
-d2ClCd_xhvi, -d2Stiffness_xhvi, d2Eaf_xhvi,
[0,0.5,0.999], 'black', ['-','--','-'],
y_ref[commercial_foils_to_plot,0], y_ref[commercial_foils_to_plot,1],
names_to_plot,
sXLabel='Integrated lift:drag ratio',
sYLabel='Flapwise bending\n stiffness ($Nm^2$)',
sTitle='xHVI',
sColorbarLabel='Empirical Attainment Front Difference\n (compared with EHVI)')
plt.savefig("aerofoil_eaf_diffs_N{0}.eps".format(max_iterations), facecolor=None, edgecolor=None)
plt.savefig("aerofoil_eaf_diffs_N{0}.png".format(max_iterations), facecolor=None, edgecolor=None)
plt.savefig("aerofoil_eaf_diffs_N{0}.svg".format(max_iterations), facecolor=None, edgecolor=None)
|
@test "must not see variable from first run" {
[[ -z "$POTENTIALLY_LEAKING_VARIABLE" ]]
}
|
/*
Copyright 2017 yangchong211(github.com/yangchong211)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.ycbjie.webviewlib.base;
import android.content.Context;
import android.graphics.Bitmap;
import android.net.Uri;
import android.os.Build;
import android.os.Bundle;
import android.os.Message;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import android.text.TextUtils;
import android.view.KeyEvent;
import com.tencent.smtt.export.external.interfaces.ClientCertRequest;
import com.tencent.smtt.export.external.interfaces.HttpAuthHandler;
import com.tencent.smtt.export.external.interfaces.SslError;
import com.tencent.smtt.export.external.interfaces.SslErrorHandler;
import com.tencent.smtt.export.external.interfaces.WebResourceError;
import com.tencent.smtt.export.external.interfaces.WebResourceRequest;
import com.tencent.smtt.export.external.interfaces.WebResourceResponse;
import com.tencent.smtt.sdk.WebView;
import com.tencent.smtt.sdk.WebViewClient;
import com.ycbjie.webviewlib.utils.X5LogUtils;
import com.ycbjie.webviewlib.utils.X5WebUtils;
import com.ycbjie.webviewlib.helper.WebSchemeIntent;
import com.ycbjie.webviewlib.inter.InterWebListener;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.util.Stack;
/**
* <pre>
* @author yangchong
* blog : https://github.com/yangchong211
* time : 2019/9/10
* desc : 自定义x5的WebViewClient
* revise: demo地址:https://github.com/yangchong211/YCWebView
*
* 作用:主要辅助 WebView 处理JavaScript 的对话框、网站 Logo、网站 title、load 进度等处理
* demo地址:https://github.com/yangchong211/YCWebView
* </pre>
*/
public class X5WebViewClient extends WebViewClient {
private InterWebListener webListener;
private WebView webView;
private Context context;
/**
* 是否加载完毕
*/
private boolean isLoadFinish = false;
/**
* 记录上次出现重定向的时间.
* 避免由于刷新造成循环重定向.
*/
private long mLastRedirectTime = 0;
/**
* 默认重定向间隔.
* 避免由于刷新造成循环重定向.
*/
private static final long DEFAULT_REDIRECT_INTERVAL = 3000;
/**
* URL栈
*/
private final Stack<String> mUrlStack = new Stack<>();
/**
* 判断页面是否加载完成
*/
private boolean mIsLoading = false;
/**
* 记录重定向前的链接
*/
private String mUrlBeforeRedirect;
/**
* 获取是否加载完毕
* @return 布尔值
*/
public boolean isLoadFinish() {
return isLoadFinish;
}
/**
* 设置监听时间,包括常见状态页面切换,进度条变化等
* @param listener listener
*/
public void setWebListener(InterWebListener listener){
this.webListener = listener;
}
/**
* 构造方法
* @param webView 需要传进来webview
* @param context 上下文
*/
public X5WebViewClient(WebView webView ,Context context) {
this.context = context;
this.webView = webView;
//将js对象与java对象进行映射
//webView.addJavascriptInterface(new ImageJavascriptInterface(context), "imagelistener");
}
/**
* 记录非重定向链接.
* 并且控制相同链接链接不入栈
*
* @param url 链接
*/
private void recordUrl(String url) {
//判断当前url,是否和栈中栈顶部的url是否相同。如果不相同,则入栈操作
if (!TextUtils.isEmpty(url) && !url.equals(getUrl())) {
if (!TextUtils.isEmpty(mUrlBeforeRedirect)) {
mUrlStack.push(mUrlBeforeRedirect);
mUrlBeforeRedirect = null;
}
}
}
/**
* 获取最后停留页面的链接.
*
* @return url
*/
@Nullable
public String getUrl() {
//peek方法,查看此堆栈顶部的对象,而不将其从堆栈中删除。
return mUrlStack.size() > 0 ? mUrlStack.peek() : null;
}
/**
* 出栈操作
* @return
*/
String popUrl() {
//pop方法,移除此堆栈顶部的对象并将该对象作为此函数的值返回。
return mUrlStack.size() > 0 ? mUrlStack.pop() : null;
}
/**
* 是否可以回退操作
* @return 如果栈中数量大于2,则表示可以回退操作
*/
public boolean pageCanGoBack() {
return mUrlStack.size() >= 2;
}
/**
* 回退操作
* @param webView webView
* @return
*/
public final boolean pageGoBack(@NonNull WebView webView) {
//判断是否可以回退操作
if (pageCanGoBack()) {
//获取最后停留的页面url
final String url = popBackUrl();
//如果不为空
if (!TextUtils.isEmpty(url)) {
webView.loadUrl(url);
return true;
}
}
return false;
}
/**
* 获取最后停留的页面url
* @return null 表示已经没有上一页了
*/
@Nullable
public final String popBackUrl() {
if (mUrlStack.size() >= 2) {
//pop current page url
mUrlStack.pop();
return mUrlStack.pop();
}
return null;
}
/**
* 解决重定向
* @param view webView
*/
private void resolveRedirect(WebView view) {
//记录当前时间
final long now = System.currentTimeMillis();
//mLastRedirectTime 记录上次出现重定向的时间
if (now - mLastRedirectTime > DEFAULT_REDIRECT_INTERVAL) {
mLastRedirectTime = System.currentTimeMillis();
view.reload();
}
}
/**
* 这个方法中可以做拦截
* 主要的作用是处理各种通知和请求事件
* 返回值是true的时候控制去WebView打开,为false调用系统浏览器或第三方浏览器
* @param view view
* @param url 链接
* @return 是否自己处理,true表示自己处理
*/
@Override
public boolean shouldOverrideUrlLoading(WebView view, String url) {
//页面关闭后,直接返回,不要执行网络请求和js方法
boolean activityAlive = X5WebUtils.isActivityAlive(context);
if (!activityAlive){
return false;
}
if (TextUtils.isEmpty(url)) {
return false;
}
try {
url = URLDecoder.decode(url, "UTF-8");
} catch (UnsupportedEncodingException e) {
e.printStackTrace();
}
/*WebView.HitTestResult hitTestResult = null;
if (url.startsWith("http:") || url.startsWith("https:")){
hitTestResult = view.getHitTestResult();
}*/
final Uri uri = Uri.parse(url);
//scheme跳转支持
if (uri!=null && uri.getScheme()!=null && WebSchemeIntent.isSilentType(uri.getScheme())) {
return WebSchemeIntent.handleSilently(context, uri);
}
WebView.HitTestResult hitTestResult = view.getHitTestResult();
if (hitTestResult == null) {
return false;
}
//HitTestResult 描述
//WebView.HitTestResult.UNKNOWN_TYPE 未知类型
//WebView.HitTestResult.PHONE_TYPE 电话类型
//WebView.HitTestResult.EMAIL_TYPE 电子邮件类型
//WebView.HitTestResult.GEO_TYPE 地图类型
//WebView.HitTestResult.SRC_ANCHOR_TYPE 超链接类型
//WebView.HitTestResult.SRC_IMAGE_ANCHOR_TYPE 带有链接的图片类型
//WebView.HitTestResult.IMAGE_TYPE 单纯的图片类型
//WebView.HitTestResult.EDIT_TEXT_TYPE 选中的文字类型
if (hitTestResult.getType() == WebView.HitTestResult.UNKNOWN_TYPE) {
return false;
}
boolean handleAlive = WebSchemeIntent.handleAlive(context, uri);
if (handleAlive){
return true;
}
return super.shouldOverrideUrlLoading(view, url);
}
/**
* 增加shouldOverrideUrlLoading在api>=24时
* 主要的作用是处理各种通知和请求事件
* 返回值是true的时候控制去WebView打开,为false调用系统浏览器或第三方浏览器
* @param view view
* @param request request,添加于API21,封装了一个Web资源的请求信息,
* 包含:请求地址,请求方法,请求头,是否主框架,是否用户点击,是否重定向
* @return
*/
@Override
public boolean shouldOverrideUrlLoading(WebView view, WebResourceRequest request) {
boolean activityAlive = X5WebUtils.isActivityAlive(context);
if (!activityAlive){
return false;
}
String url = request.getUrl().toString();
if (TextUtils.isEmpty(url)) {
return false;
}
try {
url = URLDecoder.decode(url, "UTF-8");
} catch (UnsupportedEncodingException e) {
e.printStackTrace();
}
final Uri uri = Uri.parse(url);
//scheme跳转支持
if (uri!=null && uri.getScheme()!=null && WebSchemeIntent.isSilentType(uri.getScheme())) {
return WebSchemeIntent.handleSilently(context, uri);
}
/*WebView.HitTestResult hitTestResult = null;
if (url.startsWith("http:") || url.startsWith("https:")){
hitTestResult = view.getHitTestResult();
}*/
WebView.HitTestResult hitTestResult = view.getHitTestResult();
if (hitTestResult == null) {
return false;
}
//HitTestResult 描述
//WebView.HitTestResult.UNKNOWN_TYPE 未知类型
//WebView.HitTestResult.PHONE_TYPE 电话类型
//WebView.HitTestResult.EMAIL_TYPE 电子邮件类型
//WebView.HitTestResult.GEO_TYPE 地图类型
//WebView.HitTestResult.SRC_ANCHOR_TYPE 超链接类型
//WebView.HitTestResult.SRC_IMAGE_ANCHOR_TYPE 带有链接的图片类型
//WebView.HitTestResult.IMAGE_TYPE 单纯的图片类型
//WebView.HitTestResult.EDIT_TEXT_TYPE 选中的文字类型
if (hitTestResult.getType() == WebView.HitTestResult.UNKNOWN_TYPE) {
return false;
}
boolean handleAlive = WebSchemeIntent.handleAlive(context, uri);
if (handleAlive){
return true;
}
return super.shouldOverrideUrlLoading(view, request);
}
/**
* 作用:开始载入页面调用的,我们可以设定一个loading的页面,告诉用户程序在等待网络响应。
* @param webView view
* @param url url
* @param bitmap bitmap
*/
@Override
public void onPageStarted(WebView webView, String url, Bitmap bitmap) {
super.onPageStarted(webView, url, bitmap);
//设定加载开始的操作
X5LogUtils.i("-------onPageStarted-------"+url);
if (!X5WebUtils.isConnected(webView.getContext()) && webListener!=null) {
//显示异常页面
webListener.showErrorView(X5WebUtils.ErrorMode.NO_NET);
}
isLoadFinish = false;
if (mIsLoading && mUrlStack.size() > 0) {
mUrlBeforeRedirect = mUrlStack.pop();
}
recordUrl(url);
mIsLoading = true;
}
/**
* 当页面加载完成会调用该方法
* @param view view
* @param url url链接
*/
@Override
public void onPageFinished(WebView view, String url) {
X5LogUtils.i("-------onPageFinished-------"+url);
if (mIsLoading) {
mIsLoading = false;
}
if (!X5WebUtils.isConnected(webView.getContext()) && webListener!=null) {
//隐藏进度条方法
webListener.hindProgressBar();
//显示异常页面
webListener.showErrorView(X5WebUtils.ErrorMode.NO_NET);
}
super.onPageFinished(view, url);
//设置网页在加载的时候暂时不加载图片
//webView.getSettings().setBlockNetworkImage(false);
//页面finish后再发起图片加载
if(!webView.getSettings().getLoadsImagesAutomatically()) {
webView.getSettings().setLoadsImagesAutomatically(true);
}
//html加载完成之后,添加监听图片的点击js函数
//addImageClickListener();
addImageArrayClickListener(webView);
isLoadFinish = true;
}
/**
* 请求网络出现error
* 作用:加载页面的服务器出现错误时(如404)调用。
* App里面使用webView控件的时候遇到了诸如404这类的错误的时候,若也显示浏览器里面的那种错误提示页面就显得很丑陋,
* 那么这个时候我们的app就需要加载一个本地的错误提示页面,即webView如何加载一个本地的页面
* 该方法传回了错误码,根据错误类型可以进行不同的错误分类处理
* onReceivedError只有在遇到不可用的(unrecoverable)错误时,才会被调用)
* 当WebView加载链接www.ycdoubi.com时,"不可用"的情况有可以包括有:
* 1.没有网络连接
* 2.连接超时
* 3.找不到页面www.ycdoubi.com
*
* @param view view
* @param errorCode 错误🐎
* @param description description
* @param failingUrl 失败链接
*/
@Override
public void onReceivedError(WebView view, int errorCode, String description, String failingUrl) {
super.onReceivedError(view, errorCode, description, failingUrl);
X5LogUtils.i("-------onReceivedError-------"+failingUrl);
if (Build.VERSION.SDK_INT < 23) {
//错误重定向循环
if (errorCode == ERROR_REDIRECT_LOOP) {
resolveRedirect(view);
return;
}
}
if (webListener!=null){
if (errorCode == ERROR_TIMEOUT){
//网络连接超时
webListener.showErrorView(X5WebUtils.ErrorMode.TIME_OUT);
} else if (errorCode == ERROR_CONNECT){
//断网
webListener.showErrorView(X5WebUtils.ErrorMode.NO_NET);
} {
//其他情况
webListener.showErrorView(X5WebUtils.ErrorMode.RECEIVED_ERROR);
}
}
}
/**
* 当缩放改变的时候会调用该方法
* @param view view
* @param oldScale 之前的缩放比例
* @param newScale 现在缩放比例
*/
@Override
public void onScaleChanged(WebView view, float oldScale, float newScale) {
super.onScaleChanged(view, oldScale, newScale);
X5LogUtils.i("-------onScaleChanged-------"+newScale);
//视频全屏播放按返回页面被放大的问题
if (newScale - oldScale > 7) {
//异常放大,缩回去。
view.setInitialScale((int) (oldScale / newScale * 100));
}
}
/**
* 6.0 之后
* 向主机应用程序报告Web资源加载错误。这些错误通常表明无法连接到服务器。
* 不仅为主页。因此,建议在回调过程中执行最低要求的工作。
* 该方法传回了错误码,根据错误类型可以进行不同的错误分类处理,比如
* onReceivedError只有在遇到不可用的(unrecoverable)错误时,才会被调用)
* 当WebView加载链接www.ycdoubi.com时,"不可用"的情况有可以包括有:
* 1.没有网络连接
* 2.连接超时
* 3.找不到页面www.ycdoubi.com
*
* @param view view
* @param request request,添加于API21,封装了一个Web资源的请求信息,
* 包含:请求地址,请求方法,请求头,是否主框架,是否用户点击,是否重定向
* @param error error,添加于API23,封装了一个Web资源的错误信息,包含错误码和描述
*/
@Override
public void onReceivedError(WebView view, WebResourceRequest request, WebResourceError error) {
super.onReceivedError(view, request, error);
X5LogUtils.i("-------onReceivedError-------"+error.getDescription().toString());
if (Build.VERSION.SDK_INT >= 23) {
//错误重定向循环
if (error != null && error.getErrorCode() == ERROR_REDIRECT_LOOP) {
resolveRedirect(view);
return;
}
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
X5LogUtils.d("服务器异常"+error.getDescription().toString());
}
//ToastUtils.showToast("服务器异常6.0之后");
//当加载错误时,就让它加载本地错误网页文件
//mWebView.loadUrl("file:///android_asset/errorpage/error.html");
int errorCode = error.getErrorCode();
//获取当前的网络请求是否是为main frame创建的.
boolean forMainFrame = request.isForMainFrame();
boolean redirect = request.isRedirect();
if (webListener!=null){
if (errorCode == ERROR_TIMEOUT){
//网络连接超时
webListener.showErrorView(X5WebUtils.ErrorMode.TIME_OUT);
} else if (errorCode == ERROR_CONNECT){
//断网
webListener.showErrorView(X5WebUtils.ErrorMode.NO_NET);
} {
//其他情况
webListener.showErrorView(X5WebUtils.ErrorMode.RECEIVED_ERROR);
}
}
}
/**
* 通知主机应用程序在加载资源时从服务器接收到HTTP错误
* @param view view
* @param request request,添加于API21,封装了一个Web资源的请求信息,
* 包含:请求地址,请求方法,请求头,是否主框架,是否用户点击,是否重定向
* @param errorResponse errorResponse,封装了一个Web资源的响应信息,
* 包含:响应数据流,编码,MIME类型,API21后添加了响应头,状态码与状态描述
*/
@Override
public void onReceivedHttpError(WebView view, WebResourceRequest request,
WebResourceResponse errorResponse) {
super.onReceivedHttpError(view, request, errorResponse);
int statusCode = errorResponse.getStatusCode();
String reasonPhrase = errorResponse.getReasonPhrase();
X5LogUtils.i("-------onReceivedHttpError-------"+ statusCode + "-------"+reasonPhrase);
if (statusCode == 404) {
//用javascript隐藏系统定义的404页面信息
//String data = "Page NO FOUND!";
//view.loadUrl("javascript:document.body.innerHTML=\"" + data + "\"");
if (webListener!=null){
webListener.showErrorView(X5WebUtils.ErrorMode.STATE_404);
}
} else if (statusCode == 500){
//避免出现默认的错误界面
//view.loadUrl("about:blank");
if (webListener!=null){
webListener.showErrorView(X5WebUtils.ErrorMode.STATE_500);
}
} else {
if (webListener!=null){
webListener.showErrorView(X5WebUtils.ErrorMode.RECEIVED_ERROR);
}
}
}
/**
* 通知主机应用程序已自动处理用户登录请求
* @param view view
* @param realm 数据
* @param account account
* @param args args
*/
@Override
public void onReceivedLoginRequest(WebView view, String realm, String account, String args) {
super.onReceivedLoginRequest(view, realm, account, args);
X5LogUtils.i("-------onReceivedLoginRequest-------"+ args);
}
/**
* 在加载资源时通知主机应用程序发生SSL错误
* 作用:处理https请求
* webView加载一些别人的url时候,有时候会发生证书认证错误的情况,这时候希望能够正常的呈现页面给用户,
* 我们需要忽略证书错误,需要调用WebViewClient类的onReceivedSslError方法,
* 调用handler.proceed()来忽略该证书错误。
* @param view view
* @param handler handler,表示一个处理SSL错误的请求,提供了方法操作(proceed/cancel)请求
* @param error error
*/
@Override
public void onReceivedSslError(WebView view, SslErrorHandler handler, SslError error) {
super.onReceivedSslError(view, handler, error);
X5LogUtils.i("-------onReceivedSslError-------"+ error.getUrl());
if (error!=null){
String url = error.getUrl();
if (webListener!=null){
webListener.showErrorView(X5WebUtils.ErrorMode.SSL_ERROR);
}
X5LogUtils.i("onReceivedSslError----异常url----"+url);
}
//https忽略证书问题
if (handler!=null){
//表示等待证书响应
handler.proceed();
// handler.cancel(); //表示挂起连接,为默认方式
// handler.handleMessage(null); //可做其他处理
}
}
/**
* 作用:在加载页面资源时会调用,每一个资源(比如图片)的加载都会调用一次。
* @param webView view
* @param s s
*/
@Override
public void onLoadResource(WebView webView, String s) {
super.onLoadResource(webView, s);
X5LogUtils.i("-------onLoadResource-------"+ s);
}
/**
* 这个回调添加于API23,仅用于主框架的导航
* 通知应用导航到之前页面时,其遗留的WebView内容将不再被绘制。
* 这个回调可以用来决定哪些WebView可见内容能被安全地回收,以确保不显示陈旧的内容
* 它最早被调用,以此保证WebView.onDraw不会绘制任何之前页面的内容,随后绘制背景色或需要加载的新内容。
* 当HTTP响应body已经开始加载并体现在DOM上将在随后的绘制中可见时,这个方法会被调用。
* 这个回调发生在文档加载的早期,因此它的资源(css,和图像)可能不可用。
* 如果需要更细粒度的视图更新,查看 postVisualStateCallback(long, WebView.VisualStateCallback).
* 请注意这上边的所有条件也支持 postVisualStateCallback(long ,WebView.VisualStateCallback)
* @param webView view
* @param s s
*/
@Override
public void onPageCommitVisible(WebView webView, String s) {
super.onPageCommitVisible(webView, s);
}
/**
* 此方法废弃于API21,调用于非UI线程,拦截资源请求并返回响应数据,返回null时WebView将继续加载资源
* 注意:API21以下的AJAX请求会走onLoadResource,无法通过此方法拦截
*
* 其中 WebResourceRequest 封装了请求,WebResourceResponse 封装了响应
* 封装了一个Web资源的响应信息,包含:响应数据流,编码,MIME类型,API21后添加了响应头,状态码与状态描述
* @param webView view
* @param s s
*/
@Override
public WebResourceResponse shouldInterceptRequest(WebView webView, String s) {
return super.shouldInterceptRequest(webView, s);
}
/**
* 此方法添加于API21,调用于非UI线程,拦截资源请求并返回数据,返回null时WebView将继续加载资源
*
* 其中 WebResourceRequest 封装了请求,WebResourceResponse 封装了响应
* 封装了一个Web资源的响应信息,包含:响应数据流,编码,MIME类型,API21后添加了响应头,状态码与状态描述
* @param webView view
* @param webResourceRequest request,添加于API21,封装了一个Web资源的请求信息,
* 包含:请求地址,请求方法,请求头,是否主框架,是否用户点击,是否重定向
* @return
*/
@Override
public WebResourceResponse shouldInterceptRequest(WebView webView, WebResourceRequest webResourceRequest) {
return super.shouldInterceptRequest(webView, webResourceRequest);
}
/**
* 其中 WebResourceRequest 封装了请求,WebResourceResponse 封装了响应
* 封装了一个Web资源的响应信息,包含:响应数据流,编码,MIME类型,API21后添加了响应头,状态码与状态描述
* @param webView view
* @param webResourceRequest request,添加于API21,封装了一个Web资源的请求信息,
* 包含:请求地址,请求方法,请求头,是否主框架,是否用户点击,是否重定向
* @param bundle bundle
* @return
*/
@Override
public WebResourceResponse shouldInterceptRequest(WebView webView, WebResourceRequest webResourceRequest, Bundle bundle) {
return super.shouldInterceptRequest(webView, webResourceRequest, bundle);
}
/**
*
* @param webView view
* @param message message
* @param message1 message1
*/
@Override
public void onTooManyRedirects(WebView webView, Message message, Message message1) {
super.onTooManyRedirects(webView, message, message1);
}
/**
* 是否重新提交表单,默认不重发
* @param webView view
* @param message message
* @param message1 message1
*/
@Override
public void onFormResubmission(WebView webView, Message message, Message message1) {
super.onFormResubmission(webView, message, message1);
}
/**
* 通知应用可以将当前的url存储在数据库中,意味着当前的访问url已经生效并被记录在内核当中。
* 此方法在网页加载过程中只会被调用一次,网页前进后退并不会回调这个函数。
* @param webView view
* @param s s
* @param b b
*/
@Override
public void doUpdateVisitedHistory(WebView webView, String s, boolean b) {
super.doUpdateVisitedHistory(webView, s, b);
}
/**
* 此方法添加于API21,在UI线程被调用
* 处理SSL客户端证书请求,必要的话可显示一个UI来提供KEY。
* 有三种响应方式:proceed()/cancel()/ignore(),默认行为是取消请求
* 如果调用proceed()或cancel(),Webview 将在内存中保存响应结果且对相同的"host:port"不会再次调用 onReceivedClientCertRequest
* 多数情况下,可通过KeyChain.choosePrivateKeyAlias启动一个Activity供用户选择合适的私钥
* @param webView view
* @param clientCertRequest request,表示一个证书请求,提供了方法操作(proceed/cancel/ignore)请求
*/
@Override
public void onReceivedClientCertRequest(WebView webView, ClientCertRequest clientCertRequest) {
super.onReceivedClientCertRequest(webView, clientCertRequest);
}
/**
* 处理HTTP认证请求,默认行为是取消请求
* @param webView view
* @param httpAuthHandler handler,表示一个HTTP认证请求,提供了方法操作(proceed/cancel)请求
* @param s s
* @param s1 s1
*/
@Override
public void onReceivedHttpAuthRequest(WebView webView, HttpAuthHandler httpAuthHandler, String s, String s1) {
super.onReceivedHttpAuthRequest(webView, httpAuthHandler, s, s1);
}
/**
* 给应用一个机会处理按键事件
* 如果返回true,WebView不处理该事件,否则WebView会一直处理,默认返回false
* @param webView view
* @param keyEvent event
* @return
*/
@Override
public boolean shouldOverrideKeyEvent(WebView webView, KeyEvent keyEvent) {
return super.shouldOverrideKeyEvent(webView, keyEvent);
}
/**
* 处理未被WebView消费的按键事件
* WebView总是消费按键事件,除非是系统按键或shouldOverrideKeyEvent返回true
* 此方法在按键事件分派时被异步调用
* @param webView view
* @param keyEvent event
* @return
*/
@Override
public void onUnhandledKeyEvent(WebView webView, KeyEvent keyEvent) {
super.onUnhandledKeyEvent(webView, keyEvent);
}
/**
* android与js交互:
* 首先我们拿到html中加载图片的标签img.
* 然后取出其对应的src属性
* 循环遍历设置图片的点击事件
* 将src作为参数传给java代码
* 这个循环将所图片放入数组,当js调用本地方法时传入。
* 当然如果采用方式一获取图片的话,本地方法可以不需要传入这个数组
* 通过js代码找到标签为img的代码块,设置点击的监听方法与本地的openImage方法进行连接
* @param webView webview
*/
private void addImageArrayClickListener(WebView webView) {
webView.loadUrl("javascript:(function(){" +
"var objs = document.getElementsByTagName(\"img\"); " +
"var array=new Array(); " +
"for(var j=0;j<objs.length;j++){" +
" array[j]=objs[j].src; " +
"}"+
"for(var i=0;i<objs.length;i++) " +
"{"
+ " objs[i].onclick=function() " +
" { "
+ " window.imagelistener.openImage(this.src,array); " +
" } " +
"}" +
"})()");
}
/**
* 通过js代码找到标签为img的代码块,设置点击的监听方法与本地的openImage方法进行连接
* @param webView webview
*/
private void addImageClickListener(WebView webView) {
webView.loadUrl("javascript:(function(){" +
"var objs = document.getElementsByTagName(\"img\"); " +
"for(var i=0;i<objs.length;i++) " +
"{"
+ " objs[i].onclick=function() " +
" { "
+ " window.imagelistener.openImage(this.src); " +
" } " +
"}" +
"})()");
}
}
|
<filename>GitHub/githubsdk/src/main/java/com/prt2121/githubsdk/model/request/EditIssueRequestDTOBak.java
package com.prt2121.githubsdk.model.request;
import com.prt2121.githubsdk.model.response.IssueState;
/**
* Created by Bernat on 15/04/2015.
*/
public class EditIssueRequestDTOBak {
public String title;
public String body;
public String assignee;
public IssueState state;
public String[] labels;
}
|
#ER_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"/ER_port/bin
#export PATH=$ER_DIR:$PATH
#export PCL_BIN="/home/scott/pcl_merging/pcl_copyin_funcs/build/bin"
#export ER_BIN="/home/scott/s2017/ER_port/bin"
#export PATH=$PCL_BIN:$ER_BIN:$PATH
export s2017Dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export PATH=$s2017Dir/bin:$PATH
SET_SAMPLES(){
if [ "$1" == "" ]; then
SAMPLES="50"
echo "Setting size of samples to 50. (50 frames per fragment)"
else
echo "Set samples to $1"
SAMPLES="$1"
fi
}
SETUP(){
# ONI=$1
#
# NAME=$( basename $ONI)
#
SET_SAMPLES $1
#
# CURDIR=$(pwd)
# DIR=$CURDIR/ER.$NAME
# mkdir -p $WORKDIR
# cd $WORKDIR
#
# if [ ! -e $ONI ]; then
# echo "$ONI is not an absolute path (or doesn't exist)"
# exit
# fi
#
# ln -s $ONI ./in.oni
}
CDDIR(){
# if [ $WORKDIR != "" ]; then
# cd $WORKDIR
# else
# echo "DIR NOT SET, CANNOT CD."
echo ""
# fi
}
UGLYHACK(){
echo "Ugly hack run in "`pwd`
mkdir -p hack
cd hack
OLDIFS=$IFS
IFS=$(echo -en "\n\b")
for i in `find ../../ -type f `; do ln -s $i ./ ; done
cd ..
IFS=$OLDIFS
}
ER_HELP(){
echo "PCL_KINFU"
echo "GR"
echo "GO"
echo "BC"
echo "FP"
echo "INTEGRATE"
echo "or PIPELINE"
echo "Afterwards, run pcl_kinfu_largeScale_mesh_output <filename>"
echo " Then in meshlab, import all of them, merge (removing verts/faces), and export."
}
PCL_KINFU(){
( time (
CDDIR
SET_SAMPLES $1
rm -rf kinfu
mkdir -p kinfu
cd kinfu
PCL_ARGS=" -r -ic -sd 10 -oni ../ -vs 4 --fragment "$SAMPLES" --rgbd_odometry --record_log ./100-0.log --camera ../cam.param"
# pcl_kinfu_largeScale $PCL_ARGS
KinfuLS $PCL_ARGS
cd ..
) ) 2>&1 | tee kinfu_log.txt
}
PSEUDO_KINFU(){
( time (
CDDIR
SET_SAMPLES $1
rm -rf kinfu
mkdir -p kinfu
cd kinfu
genTraj.sh #This is located in the openCV_TRAJ
pseudo_kinfu.sh
cd ..
) ) 2>&1 | tee pkinfu_log.txt
}
GR(){
( time (
CDDIR
SET_SAMPLES $1
rm -rf gr
mkdir -p gr
cd gr
ARGS=" ../kinfu/ ../kinfu/100-0.log $SAMPLES"
GlobalRegistration $ARGS
cd ..
) ) 2>&1 | tee gr_log.txt
}
GO(){
( time (
CDDIR
rm -rf go
mkdir -p go
cd go
ARGS="-w 100 --odometry ../gr/odometry.log --odometryinfo ../gr/odometry.info --loop ../gr/result.txt --loopinfo ../gr/result.info --pose ./pose.log --keep keep.log --refine ./reg_refine_all.log"
time ( GraphOptimizer $ARGS )
cd ..
) ) 2>&1 | tee go_log.txt
}
BC(){
( time (
CDDIR
rm -rf bc
mkdir -p bc
cd bc
UGLYHACK
ARGS=" --reg_traj ./hack/reg_refine_all.log --registration --reg_dist 0.05 --reg_ratio 0.25 --reg_num 0 --save_xyzn "
#ARGS=" --reg_traj ./go/reg_refine_all.log --registration --reg_dist 0.05 --reg_ratio 0.25 --reg_num 0 --save_xyzn "
BuildCorrespondence $ARGS
echo "BC DONE: $?"
cd ..
) ) 2>&1 | tee bc_log.txt
}
FO(){
( time (
CDDIR
rm -rf fo
mkdir -p fo
cd fo
NUMPCDS=$(ls -l ../kinfu/cloud_bin_*.pcd | wc -l | tr -d ' ')
UGLYHACK
# ARGS=" --slac --rgbdslam ./hack/init.log --registration ./hack/reg_output.log --dir ./hack/ --num $NUMPCDS --resolution 12 --iteration 10 --length 4.0 --write_xyzn_sample 10"
ARGS=" --slac --rgbdslam ../gr/init.log --registration ../bc/reg_output.log --dir ./hack/ --num $NUMPCDS --resolution 12 --iteration 10 --length 4.0 --write_xyzn_sample 10"
FragmentOptimizer $ARGS
cd ..
echo "Done fragment"
) ) 2>&1 | tee fo_log.txt
}
INTEGRATE(){
( time (
CDDIR
SET_SAMPLES $1
rm -rf integrate
mkdir -p integrate
cd integrate
ln -s ../fo ./
NUMPCDS=$(ls -l ../kinfu/cloud_bin_*.pcd | wc -l | tr -d ' ')
UGLYHACK
echo "Running Integrate with Fragment Optimizer"
ARGS=" --pose_traj ../fo/pose.log --seg_traj ../kinfu/100-0.log --ctr ../fo/output.ctr --num $NUMPCDS --resolution 12 --camera ../cam.param --oni_file ../ --length 4.0 --interval $SAMPLES --save_to fo_world.pcd "
Integrate $ARGS
echo "Running Integrate without Fragment Optimizer"
ARGS=" --pose_traj ../go/pose.log --seg_traj ../kinfu/100-0.log --resolution 12 --camera ../cam.param --oni_file ../ --length 4.0 --interval $SAMPLES --save_to go_world.pcd "
Integrate $ARGS
cd ..
) ) 1>&2 | tee integrate_log.txt
}
MESH(){
( time (
CDDIR
SET_SAMPLES $1
rm -rf mesh
mkdir -p mesh
cd mesh
INFILE=""
if [ -e ../integrate/fo_world.pcd ]; then
echo "Running Integrate with Fragment Optimizer"
INFILE="../integrate/fo_world.pcd"
else
echo "Running Integrate without Fragment Optimizer"
INFILE="../integrate/go_world.pcd"
fi
#pcl_kinfu_largeScale_mesh_output $INFILE --volume_size 4
KinfuLS_meshOut $INFILE --volume_size 4
#String for meshlab inputs
MESHES=""
for i in `ls | grep mesh_`; do
MESHES=$MESHES" -i "$i
done
meshlabserver -s $s2017Dir/meshlab_merge_meshes_script.mlx -o mesh.ply $MESHES
cd ..
) ) 1>&2 | tee mesh_log.txt
}
Pipeline() {
( time (
SETUP $1 $2
PCL_KINFU $SAMPLES
GR $SAMPLES
GO $SAMPLES
BC $SAMPLES
FO $SAMPLES
INTEGRATE $SAMPLES
MESH
echo "Pipeline Finished" > status.txt
) ) 2>&1 | tee pipeline.log
}
PPipeline() {
( time (
SETUP $1 $2
PSEUDO_KINFU
GR $SAMPLES
GO $SAMPLES
BC $SAMPLES
FO $SAMPLES
INTEGRATE $SAMPLES
MESH
echo "Pseudo Pipeline Finished" > status.txt
) ) 2>&1 | tee pseudo_pipeline.log
}
CompairPipe(){
( time (
SETUP $1 $2
#ASSUMES BTRFS partiton w/ copy-on-write:
rm -rf pseudo
rm -rf pcl
FILES=`ls`
mkdir pseudo
mkdir pcl
cp --reflink -r ./$FILES ./pseudo
cp --reflink -r ./$FILES ./pcl
cd pseudo
if [ ! -e status.txt ] ; then
PPipeline
fi
cd ..
cd pcl
if [ ! -e status.txt ] ; then
Pipeline
fi
cd ..
) ) 2>&1 | tee compair_log.txt
}
#if [ "$1" == "" ]; then
# echo "Ye need arguments (The oni file in absolute path)"
#else
# Pipeline $1 $2
#fi
|
<reponame>lgoldstein/communitychest
package net.community.chest.apache.httpclient.methods.multipart;
import org.apache.commons.httpclient.methods.multipart.StringPart;
/**
* <P>Copyright 2007 as per GPLv2</P>
*
* <P>Useful class for posting a char-array data value. The default charset is
* same as the JVM, but can be changed via {@link org.apache.commons.httpclient.methods.multipart.PartBase#setCharSet(java.lang.String)}</P>
*
* @author <NAME>.
* @since Oct 10, 2007 12:15:05 PM
*/
public class CharArrayPostPart extends StringPart {
/**
* @param name parameter name
* @param b data buffer to be posted - may NOT be null
* @param off offset of data in buffer to post - may NOT be <0
* @param len number of elements - may NOT be <=0
* @throws IndexOutOfBoundsException if illegal buffer data specified
* @see String#String(char[], int, int)
*/
public CharArrayPostPart (String name, char[] b, int off, int len) throws IndexOutOfBoundsException
{
super(name, new String(b, off, len));
}
/**
* @param name parameter name
* @param b data buffer to be posted - may NOT be null/empty
* @throws IndexOutOfBoundsException if illegal buffer data specified
* @see String#String(char[])
*/
public CharArrayPostPart (String name, char[] b) throws IndexOutOfBoundsException
{
super(name, new String(b));
}
}
|
/////////1/////////2/////////3/////////4/////////5/////////6/////////7/////////8
// Name :
// Author : Avi
// Revision : $Revision: #53 $
//
// Copyright 2009-2020 ECMWF.
// This software is licensed under the terms of the Apache Licence version 2.0
// which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
// In applying this licence, ECMWF does not waive the privileges and immunities
// granted to it by virtue of its status as an intergovernmental organisation
// nor does it submit to any jurisdiction.
//
// Description :
/////////1/////////2/////////3/////////4/////////5/////////6/////////7/////////8
#include "Node.hpp"
#include "Str.hpp"
#include "NodePath.hpp"
#include "Defs.hpp"
#include "Limit.hpp"
#include "MiscAttrs.hpp"
using namespace ecf;
using namespace std;
using namespace boost;
/// Output a vector to cout
template<class T>
ostream& operator<<(ostream& os, const vector<T>& v)
{
copy(v.begin(), v.end(), ostream_iterator<T>(cout, " "));
return os;
}
bool Node::findParentVariableValue(const std::string& name, std::string& theValue) const
{
if (!vars_.empty() && findVariableValue(name,theValue)) return true;
if (!repeat_.empty() && repeat_.name() == name) {
theValue = repeat_.valueAsString(); return true;
}
if (findGenVariableValue(name,theValue)) return true;
Node* theParent = parent();
while (theParent) {
if (theParent->findVariableValue(name,theValue)) return true;
const Repeat& rep = theParent->repeat();
if (!rep.empty() && rep.name() == name) {
theValue = rep.valueAsString(); return true;
}
if (theParent->findGenVariableValue(name,theValue)) return true;
theParent = theParent->parent();
}
// If all else fails search defs environment, returns empty string if match not found
// The defs environment is constructed via:
// o/ default settings for ECF_HOME,ECF_LOG, ECF_CHECK,ECF_CHECKOLD,ECF_CHECKINTERVAL
// ECF_INTERVAL ECF_CHECKMODE ECF_JOB_CMD ECF_MICRO ECF_TRIES ECF_PORT, ECF_HOST
// o/ These values are updated from the server environment when the BEGIN cmd is called.
Defs* the_defs = defs();
if ( the_defs ) {
theValue = the_defs->server().find_variable(name);
if ( !theValue.empty() ) return true;
}
return false; // the variable cannot be found
}
bool Node::find_parent_gen_variable_value(const std::string& name, std::string& theValue) const
{
if (findGenVariableValue(name,theValue)) return true;
Node* theParent = parent();
while (theParent) {
if (theParent->findGenVariableValue(name,theValue)) return true;
theParent = theParent->parent();
}
// If all else fails search defs environment, returns empty string if match not found
// The defs environment is constructed via:
// o/ default settings for ECF_HOME,ECF_LOG, ECF_CHECK,ECF_CHECKOLD,ECF_CHECKINTERVAL
// ECF_INTERVAL ECF_CHECKMODE ECF_JOB_CMD ECF_MICRO ECF_TRIES ECF_PORT, ECF_HOST
// o/ These values are updated from the server environment when the BEGIN cmd is called.
Defs* the_defs = defs();
if ( the_defs ) {
theValue = the_defs->server().find_variable(name);
if ( !theValue.empty() ) return true;
}
return false; // the variable cannot be found
}
bool Node::findParentUserVariableValue(const std::string& name, std::string& theValue) const
{
if (findVariableValue(name,theValue)) return true;
Node* theParent = parent();
while (theParent) {
if (theParent->findVariableValue(name,theValue)) return true;
theParent = theParent->parent();
}
// If all else fails search defs environment, returns empty string if match not found
Defs* the_defs = defs();
if ( the_defs ) {
// Note: when calling ecflow_client --get_state=/suite/task
// The node can be detached from the defs.
theValue = the_defs->server().find_variable(name);
if ( !theValue.empty() ) return true;
}
return false; // the variable cannot be found
}
const std::string& Node::find_parent_user_variable_value(const std::string& name) const
{
const Variable& var = findVariable(name);
if (!var.empty()) return var.theValue();
Node* theParent = parent();
while (theParent) {
const Variable& pvar = theParent->findVariable(name);
if (!pvar.empty()) return pvar.theValue();
theParent = theParent->parent();
}
Defs* the_defs = defs();
if ( the_defs ) {
// Note: when calling ecflow_client --get_state=/suite/task
// The node can be detached from the defs.
return the_defs->server().find_variable(name);
}
return Str::EMPTY();
}
bool Node::user_variable_exists(const std::string& name) const
{
const Variable& var = findVariable(name);
if (!var.empty()) return true;
Node* theParent = parent();
while (theParent) {
const Variable& pvar = theParent->findVariable(name);
if (!pvar.empty()) return true;
theParent = theParent->parent();
}
// If all else fails search defs environment, returns empty string if match not found
Defs* the_defs = defs();
if ( the_defs ) {
// Note: when calling ecflow_client --get_state=/suite/task
// The node can be detached from the defs.
return the_defs->server().variable_exists(name);
}
return false;
}
const Variable& Node::findVariable(const std::string& name) const
{
for(const auto& v: vars_) {
if (v.name() == name) {
return v;
}
}
return Variable::EMPTY();
}
std::string Node::find_parent_variable_sub_value(const std::string& name) const
{
std::string ret;
const Variable& var = findVariable(name);
if (!var.empty()) {
ret = var.theValue();
variableSubsitution(ret);
return ret;
}
Node* theParent = parent();
while (theParent) {
const Variable& pvar = theParent->findVariable(name);
if (!pvar.empty()) {
ret = pvar.theValue();
variableSubsitution(ret);
return ret;
}
theParent = theParent->parent();
}
// If all else fails search defs environment
Defs* the_defs = defs();
if ( the_defs ) {
const Variable& pvar = the_defs->server().findVariable(name);
ret = pvar.theValue();
the_defs->variableSubsitution(ret);
return ret;
}
return string();
}
const Variable& Node::find_parent_variable(const std::string& name) const
{
const Variable& var = findVariable(name);
if (!var.empty()) return var;
Node* theParent = parent();
while (theParent) {
const Variable& pvar = theParent->findVariable(name);
if (!pvar.empty()) return pvar;
theParent = theParent->parent();
}
// If all else fails search defs environment
Defs* the_defs = defs();
if ( the_defs ) {
return the_defs->server().findVariable(name);
}
return Variable::EMPTY();
}
bool Node::findVariableValue( const std::string& name, std::string& returnedValue) const
{
for(const auto& var: vars_) {
if (var.name() == name) {
returnedValue = var.theValue();
return true;
}
}
return false;
}
bool Node::findGenVariableValue( const std::string& name, std::string& returnedValue) const
{
const Variable& genVar = findGenVariable(name);
if (!genVar.empty()) {
returnedValue = genVar.theValue();
return true;
}
return false;
}
bool Node::findLimit(const Limit& theLimit) const
{
for(const auto& lim: limits_) {
if (lim->name() == theLimit.name()) {
return true;
}
}
return false;
}
limit_ptr Node::find_limit(const std::string& theName) const
{
for(const auto& lim: limits_) {
if (lim->name() == theName) {
return lim;
}
}
return limit_ptr();
}
limit_ptr Node::findLimitUpNodeTree(const std::string& name) const
{
limit_ptr theFndLimit = find_limit(name);
if (theFndLimit.get()) return theFndLimit;
Node* theParent = parent();
while (theParent != nullptr) {
limit_ptr theFndLimit2 = theParent->find_limit(name);
if (theFndLimit2.get()) return theFndLimit2;
theParent = theParent->parent();
}
return limit_ptr();
}
const Event& Node::findEvent(const Event& theEvent) const
{
for(const auto& e: events_) {
// compare ignores state like value_ and initial_value
if (e.compare(theEvent)) {
return e;
}
}
return Event::EMPTY();
}
const Event& Node::findEventByNumber(int number) const
{
for(const auto& e: events_) {
if (e.number() == number) {
return e;
}
}
return Event::EMPTY();
}
const Event& Node::findEventByName( const std::string& event_name) const
{
for(const auto& e: events_) {
if (e.name() == event_name) {
return e;
}
}
return Event::EMPTY();
}
const Event& Node::findEventByNameOrNumber( const std::string& theName) const
{
const Event& event = findEventByName(theName);
if (!event.empty()) {
return event;
}
// Test for numeric, and then casting, is ****faster***** than relying on exception alone
if ( theName.find_first_of( Str::NUMERIC(), 0 ) == 0 ) {
try {
auto eventNumber = boost::lexical_cast< int >( theName );
return findEventByNumber(eventNumber);
}
catch ( boost::bad_lexical_cast&) {}
}
return Event::EMPTY();
}
const Meter& Node::findMeter(const std::string& name) const
{
for(const auto& m: meters_) {
if (m.name() == name) {
return m;
}
}
return Meter::EMPTY();
}
Meter& Node::find_meter(const std::string& name)
{
for(auto& m: meters_) {
if (m.name() == name) {
return m;
}
}
return const_cast<Meter&>(Meter::EMPTY());
}
bool Node::findLabel(const std::string& name) const
{
for(const auto& l: labels_) {
if (l.name() == name) {
return true;
}
}
return false;
}
const Label& Node::find_label(const std::string& name) const
{
for(const auto& l: labels_) {
if (l.name() == name) {
return l;
}
}
return Label::EMPTY();
}
bool Node::findVerify(const VerifyAttr& v) const
{
if (misc_attrs_) return misc_attrs_->findVerify(v);
return false;
}
const QueueAttr& Node::find_queue(const std::string& name) const
{
if (misc_attrs_) return misc_attrs_->find_queue(name);
return QueueAttr::EMPTY();
}
QueueAttr& Node::findQueue(const std::string& name)
{
if (misc_attrs_) return misc_attrs_->findQueue(name);
return QueueAttr::EMPTY1();
}
const GenericAttr& Node::find_generic(const std::string& name) const
{
if (misc_attrs_) return misc_attrs_->find_generic(name);
return GenericAttr::EMPTY();
}
const Repeat& Node::findRepeat(const std::string& name) const
{
if (!repeat_.empty() && repeat_.name() == name) {
return repeat_;
}
return Repeat::EMPTY();
}
bool Node::findExprVariable( const std::string& name)
{
// if event found return true. also mark this event so simulator know its used in a trigger
if ( set_event_used_in_trigger(name) ) {
return true;
}
// if meter found mark as used in trigger for simulator and return
if ( set_meter_used_in_trigger(name) ) {
return true;
}
const Variable& user_variable = findVariable( name );
if (!user_variable.empty()) return true;
const Repeat& repeat = findRepeat( name );
if (!repeat.empty()) return true;
const Variable& gen_variable = findGenVariable( name );
if (!gen_variable.empty()) return true;
limit_ptr limit = find_limit( name );
if (limit.get()) return true;
QueueAttr& queue_attr = findQueue( name );
if (!queue_attr.empty()) {
queue_attr.set_used_in_trigger(true);
return true;
}
return false;
}
int Node::findExprVariableValue( const std::string& name) const
{
const Event& event = findEventByNameOrNumber( name );
if ( !event.empty() ) return (event.value() ? 1 : 0);
const Meter& meter = findMeter( name );
if ( !meter.empty() ) return meter.value();
const Variable& variable = findVariable( name );
if ( !variable.empty() ) return variable.value();
const Repeat& repeat = findRepeat( name );
if ( !repeat.empty() ) {
// RepeatDate last_valid_value() returns the date by its real value as a long
// RepeatDatelist last_valid_value() returns the date by its real value as a long
// RepeatInteger last_valid_value() returns the value, by the current value of integer
// RepeatEnumerated last_valid_value() returns the current value if cast-able to integer otherwise position/index,
// (i.e. since enum can be anything)
// RepeatString last_valid_value() returns the current position/index ( Alternatives ? )
// RepeatDay last_valid_value() returns the current step
// Note: At Repeat expiration Repeat::value() may be out of range of start-end
// But Repeat::last_valid_value() should always be in range, hence at Repeat expiration
// will return the last valid value.
return repeat.last_valid_value();
}
const Variable& gen_variable = findGenVariable( name );
if ( !gen_variable.empty() ) return gen_variable.value();
limit_ptr limit = find_limit( name );
if (limit.get()) return limit->value();
const QueueAttr& queue_attr = find_queue( name );
if ( !queue_attr.empty() ) return queue_attr.index_or_value();
return 0;
}
int Node::findExprVariableValueAndPlus(const std::string& name, int val) const
{
const Event& event = findEventByNameOrNumber( name );
if ( !event.empty() ) return ((event.value() ? 1 : 0) + val);
const Meter& meter = findMeter( name );
if ( !meter.empty() ) return (meter.value() + val);
const Variable& variable = findVariable( name );
if ( !variable.empty() ) return (variable.value() + val);
const Repeat& repeat = findRepeat( name );
if ( !repeat.empty() ) {
// RepeatDate last_valid_value() returns the date by its real value as a long
// RepeatInteger last_valid_value() returns the value, by the current value of integer
// RepeatEnumerated last_valid_value() returns the current value if cast-able as integer otherwise position/index,
// (i.e. since enum can be anything)
// RepeatString last_valid_value() returns the current position/index ( Alternatives ? )
// RepeatDay last_valid_value() returns the current step
// Note: At Repeat expiration Repeat::value() may be out of range of start-end
// But Repeat::last_valid_value() should always be in range, hence at Repeat expiration
// will return the last valid value.
return repeat.last_valid_value_plus(val);
}
const Variable& gen_variable = findGenVariable( name );
if ( !gen_variable.empty() ) return (gen_variable.value()+val);
limit_ptr limit = find_limit( name );
if (limit.get()) return (limit->value() + val);
const QueueAttr& queue_attr = find_queue( name );
if ( !queue_attr.empty() ) return (queue_attr.index_or_value() + val );
return val;
}
int Node::findExprVariableValueAndMinus(const std::string& name, int val) const
{
const Event& event = findEventByNameOrNumber( name );
if ( !event.empty() ) return ( (event.value() ? 1 : 0) - val);
const Meter& meter = findMeter( name );
if ( !meter.empty() ) return (meter.value() - val );
const Variable& variable = findVariable( name );
if ( !variable.empty() ) return (variable.value() - val);
const Repeat& repeat = findRepeat( name );
if ( !repeat.empty() ) {
// RepeatDate last_valid_value() returns the date by its real value as a long
// RepeatInteger last_valid_value() returns the value, by the current value of integer
// RepeatEnumerated last_valid_value() returns the current value if cast-able as integer, else position/index, (
// i.e. since enum can be anything)
// RepeatString last_valid_value() returns the current position/index ( Alternatives ? )
// RepeatDay last_valid_value() returns the current step
// Note: At Repeat expiration Repeat::value() may be out of range of start-end
// But Repeat::last_valid_value() should always be in range, hence at Repeat expiration
// will return the last valid value.
return repeat.last_valid_value_minus(val);
}
const Variable& gen_variable = findGenVariable( name );
if ( !gen_variable.empty() ) return (gen_variable.value() - val);
limit_ptr limit = find_limit( name );
if (limit.get()) return (limit->value() - val);
const QueueAttr& queue_attr = find_queue( name );
if ( !queue_attr.empty() ) return (queue_attr.index_or_value() - val );
return -val;
}
int Node::findExprVariableValueAndType( const std::string& name, std::string& varType) const
{
const Event& event = findEventByNameOrNumber( name );
if ( !event.empty() ) {
varType = "event";
return (event.value() ? 1 : 0);
}
const Meter& meter = findMeter( name );
if ( !meter.empty() ) {
varType = "meter";
return meter.value();
}
const Variable& variable = findVariable( name );
if ( !variable.empty() ) {
varType = "user-variable";
return variable.value();
}
const Repeat& repeat = findRepeat( name );
if ( !repeat.empty() ) {
varType = "repeat";
return repeat.last_valid_value();
}
const Variable& gen_variable = findGenVariable( name );
if ( !gen_variable.empty() ) {
varType = "gen-variable";
return gen_variable.value();
}
limit_ptr limit = find_limit( name );
if (limit.get()) {
varType = "limit";
return limit->value();
}
const QueueAttr& queue_attr = find_queue( name );
if ( !queue_attr.empty() ) {
varType = "queue";
return queue_attr.index_or_value();
}
varType = "variable-not-found";
return 0;
}
void Node::findExprVariableAndPrint( const std::string& name, ostream& os) const
{
const Event& event = findEventByNameOrNumber( name );
if ( !event.empty() ) {
os << "EVENT value(" << event.value() << ")";
return;
}
const Meter& meter = findMeter( name );
if ( !meter.empty() ) {
os << "METER value(" << meter.value() << ")";
return;
}
const Variable& variable = findVariable( name );
if ( !variable.empty() ) {
os << "USER-VARIABLE value(" << variable.value() << ")";
return;
}
const Repeat& repeat = findRepeat( name );
if ( !repeat.empty() ) {
os << "REPEAT value(" << repeat.value() << ")";
return;
}
const Variable& gen_variable = findGenVariable( name );
if ( !gen_variable.empty() ) {
os << "GEN-VARIABLE value(" << gen_variable.value() << ")";
return;
}
limit_ptr limit = find_limit( name );
if (limit.get()) {
os << limit->toString() << " value(" << limit->value() << ")";
return;
}
const QueueAttr& queue_attr = find_queue( name );
if ( !queue_attr.empty() ) {
os << "QUEUE " << queue_attr.name() << " value(" << queue_attr.index_or_value() << ")";
return;
}
}
node_ptr findRelativeNode( const vector<std::string>& theExtractedPath,node_ptr triggerNode, std::string& errorMsg )
{
// The referenced node could be itself(error) or most likely a sibling node.
auto extractedPathSize = static_cast<int>(theExtractedPath.size());
if (extractedPathSize == 1 && triggerNode->name() == theExtractedPath[0]) {
// self referencing node ?
return triggerNode;
}
// Can only find *sibling* if triggerNode has a parent
if (!triggerNode->parent()) {
errorMsg = "Parent empty. Could not find referenced node\n";
return node_ptr();
}
if (extractedPathSize == 1) {
size_t child_pos; // not used
node_ptr theNode = triggerNode->parent()->findImmediateChild(theExtractedPath[0],child_pos);
if ( theNode.get() ) {
return theNode;
}
}
else {
#ifdef DEBUG_FIND_REFERENCED_NODE
cout << "triggerNode: " << triggerNode->debugNodePath() << "\n";
cout << "triggerNode->parent(): " << triggerNode->parent()->debugNodePath() << "\n";
#endif
node_ptr constNode = triggerNode->parent()->find_relative_node(theExtractedPath);
if (constNode.get()) {
return constNode;
}
constNode = triggerNode->find_relative_node(theExtractedPath);
if (constNode.get()) {
return constNode;
}
}
errorMsg = "Could not find node '";
if (extractedPathSize == 1) errorMsg += theExtractedPath[0];
else { for(const string& s: theExtractedPath) { errorMsg += s; errorMsg += Str::PATH_SEPERATOR();} }
errorMsg += "' from node ";
errorMsg += triggerNode->absNodePath();
if (extractedPathSize == 1) {
errorMsg += " . Expected '";
errorMsg += theExtractedPath[0];
errorMsg += "' to be a sibling.";
}
errorMsg += "\n";
return node_ptr();
}
node_ptr Node::non_const_this() const {
return const_pointer_cast<Node>(shared_from_this());
}
node_ptr Node::findReferencedNode(const std::string& nodePath, std::string& errorMsg) const
{
return findReferencedNode(nodePath,Str::EMPTY(),errorMsg);
}
//#define DEBUG_FIND_REFERENCED_NODE 1
node_ptr Node::findReferencedNode(const std::string& nodePath, const std::string& extern_obj, std::string& errorMsg) const
{
#ifdef DEBUG_FIND_REFERENCED_NODE
cout << "Node::findReferencedNode path:" << nodePath << " extern_obj:" << extern_obj << "\n";
#endif
Defs* theDefs = defs();
if (!theDefs) {
// In the case where we have a stand alone Node. i.e no parent set. The Defs will be NULL.
// Take the case where we want to dump the state of a single node.
// ecflow_client --get_state=/test/f2 --port=4141
// Here we are printing the state of the Node only, *NO* defs is returned.
// The print will cause the AST to be evaluated. The trigger evaluation will required chasing
// down reference nodes. Hence we will end up here. Rather than crashing, just return a NULL Pointer.
return node_ptr();
}
/// findReferencedNode:: is used to locate references:
/// a/ Trigger & complete expressions, this is where extern_obj is used.
/// b/ Inlimit nodepaths.
/// In *both* the case above, the node path may not exist, in the definition. Hence::
/// On client side:: references not defined in externs are considered errors
/// On server side:: No extern's are stored, hence for unresolved node paths, we return NULL
#ifdef DEBUG_FIND_REFERENCED_NODE
string debug_path = "Searching for path " + nodePath + " from " + debugType() + Str::COLON() + absNodePath() + "\n";
#endif
// if an absolute path cut in early
if (!nodePath.empty() && nodePath[0] == '/') {
#ifdef DEBUG_FIND_REFERENCED_NODE
debug_path += "(!nodePath.empty() && nodePath[0] == '/') \n";
#endif
// Must be a absolute path. i.e /suite/family/path
node_ptr constNode = theDefs->findAbsNode(nodePath);
if (constNode.get()) {
return constNode;
}
// *NOTE*: The server does *NOT* store externs, hence the check below, will always return false, for the server:
// Must be an extern:
// extern /referenceSuite/family/task:obj
// extern /referenceSuite/family/task
if (theDefs->find_extern(nodePath,extern_obj)) {
// =================================================================
// **Client side* specific: Only client side defs, stores extrens
// =================================================================
#ifdef DEBUG_FIND_REFERENCED_NODE
debug_path += "theDefs->find_extern(nodePath) \n";
#endif
// return NULL *without* setting an error message as node path is defined as an extern
// OK: the node path appears in the extern list. This may be because that suite has not been loaded.
// *** If the suite is loaded, then its an error that we did not
// *** locate the node. i.e in the previous call to defs->findAbsNode(nodePath);
vector<string> theExtractedPath;
NodePath::split(nodePath, theExtractedPath );
std::string referenceSuite = theExtractedPath[0];
if (theDefs->findSuite(referenceSuite)) {
// The suite referenced in the extern is LOADED, but path did not resolve,
// in previous call to defs->findAbsNode(nodePath);
errorMsg = "Extern path ";
errorMsg += nodePath;
errorMsg += " does not exist on suite ";
errorMsg += referenceSuite;
errorMsg += "\n";
#ifdef DEBUG_FIND_REFERENCED_NODE
errorMsg += debug_path;
#endif
}
// Its an extern path that references a suite thats NOT loaded yet
return node_ptr();
}
errorMsg = ": Could not find referenced node, using absolute path '";
errorMsg += nodePath;
errorMsg += "\n";
return node_ptr();
}
/// =============================================================================
/// Path is something other than ABSOLUTE path
/// =============================================================================
vector<string> theExtractedPath;
NodePath::split( nodePath, theExtractedPath );
#ifdef DEBUG_FIND_REFERENCED_NODE
debug_path += "extracted path = ";
for(const string& s: theExtractedPath) { debug_path += ",";debug_path += s;}
debug_path += "\n";
#endif
if ( theExtractedPath.empty() ) {
std::stringstream ss;
ss << ": Could not find referenced node '" << nodePath << "' from node " << absNodePath() << "\n";
errorMsg = ss.str();
#ifdef DEBUG_FIND_REFERENCED_NODE
errorMsg += debug_path;
#endif
return node_ptr();
}
// i.e " a == complete" =====> nodePath = a
if ( theExtractedPath.size() == 1) {
#ifdef DEBUG_FIND_REFERENCED_NODE
debug_path += "( theExtractedPath.size() == 1)\n";
#endif
// Search for a relative node first
string localErrorMsg;
node_ptr res = findRelativeNode(theExtractedPath,non_const_this(),localErrorMsg);
#ifdef DEBUG_FIND_REFERENCED_NODE
if (!localErrorMsg.empty()) {
debug_path += localErrorMsg;
localErrorMsg = debug_path;
}
#endif
if (!res.get()) {
// lets see if its in an extern Node. extern can have absolute and relative paths
// In this case it will be a relative path, hence no point trying to see if suite
// is loaded
if (theDefs->find_extern(nodePath,extern_obj)) {
// =================================================================
// **Client side* specific: Only client side defs, stores externs
// =================================================================
// The path exist in the extern and we know its relative
return node_ptr();
}
}
errorMsg += localErrorMsg;
return res;
}
// handle Node path of type a/b/c
if (theExtractedPath.size() >= 2 && theExtractedPath[0] != "." && theExtractedPath[0] != "..") {
#ifdef DEBUG_FIND_REFERENCED_NODE
debug_path += "(theExtractedPath.size() >= 2 && theExtractedPath[0] != \".\") && theExtractedPath[0] != \"..\"\n";
#endif
// First check to see see if its in the externs
if (theDefs->find_extern(nodePath,extern_obj)) {
// =================================================================
// **Client side* specific: Only client side defs, stores externs
// =================================================================
// The path a/b/c exist in the extern and we know its relative
// Again no point in checking to see if suite is loaded if path is relative
return node_ptr();
}
// In this case its equivalent to: ./a/b/c
theExtractedPath.insert(theExtractedPath.begin(),".");
}
// node path == "./a"
if ( theExtractedPath.size() >= 2 && theExtractedPath[0] == ".") {
#ifdef DEBUG_FIND_REFERENCED_NODE
debug_path += "theExtractedPath.size() == 2 && theExtractedPath[0] == \".\" \n";
#endif
theExtractedPath.erase( theExtractedPath.begin() + 0);
node_ptr res = findRelativeNode(theExtractedPath,non_const_this(),errorMsg);
#ifdef DEBUG_FIND_REFERENCED_NODE
if (!errorMsg.empty()) {
debug_path += errorMsg;
errorMsg = debug_path;
}
#endif
return res;
}
// ********************************************************************
// Note ./ sibling go to parent and search down
// ../ search at level of parent, requires we go up to parents parent to search down
// **********************************************************************
// Handle node path of the type "../a/b/c" "../../a/b/c"
if ( theExtractedPath.size() >= 2 && theExtractedPath[0] == "..") {
#ifdef DEBUG_FIND_REFERENCED_NODE
debug_path += "( theExtractedPath.size() >= 2 && theExtractedPath[0] == \"..\")\n";
#endif
Node* theParent = parent(); // get past the first parent
while ( static_cast<int>(theExtractedPath.size()) &&
theParent &&
theExtractedPath[0] == ".." )
{
theExtractedPath.erase( theExtractedPath.begin() + 0);
theParent = theParent->parent(); // for each .. go up a parent
#ifdef DEBUG_FIND_REFERENCED_NODE
debug_path += "..: thepParent = ";
if (theParent) debug_path += theParent->absNodePath();
else debug_path += "NULL";
#endif
}
if ( theParent ) {
#ifdef DEBUG_FIND_REFERENCED_NODE
debug_path += "searching = " + theParent->name() + "\n";
for(const std::string& s : theExtractedPath) { debug_path += Str::PATH_SEPERATOR() + s; }
debug_path += "\n";
#endif
node_ptr constNode = theParent->find_relative_node(theExtractedPath);
if (constNode) {
return constNode;
}
}
else {
// search suites, with the remaining path in theExtractedPath
std::string path;
for(const auto & i : theExtractedPath) {
path += Str::PATH_SEPERATOR();
path += i;
}
node_ptr fndNode = theDefs->findAbsNode(path);
if (fndNode) {
return fndNode;
}
}
}
errorMsg = "Unrecognised path ";
errorMsg += nodePath;
errorMsg += " for Node ";
errorMsg += absNodePath();
errorMsg += "\n";
#ifdef DEBUG_FIND_REFERENCED_NODE
errorMsg += debug_path;
#endif
return node_ptr();
}
const ZombieAttr& Node::findZombie( ecf::Child::ZombieType zombie_type) const
{
if (misc_attrs_) return misc_attrs_->findZombie(zombie_type);
return ZombieAttr::EMPTY();
}
bool Node::findParentZombie(ecf::Child::ZombieType z_type, ZombieAttr& z) const
{
const ZombieAttr& the_zombie = findZombie(z_type);
if ( !the_zombie.empty() ) {
z = the_zombie;
return true;
}
Node* theParent = parent();
while (theParent) {
const ZombieAttr& the_zombie2 = theParent->findZombie(z_type);
if ( !the_zombie2.empty() ) {
z = the_zombie2;
return true;
}
theParent = theParent->parent();
}
return false;
}
|
#!/bin/bash
##
## Start installation of macOS Workstation with Ansible
##
repo="macos-setup-ansible"
export PYTHONUSERBASE="${HOME}/.ansible-bootstrap"
export PATH=${PYTHONUSERBASE}/bin:$PATH
sudo -v
sudo launchctl limit maxfiles unlimited
sudo easy_install pip
pip install --user ansible
mkdir -p "${repo}"
cd "${repo}"
curl -L "https://github.com/stephenulmer/${repo}/tarball/master" \
| tar -xf - --strip-components 1
ansible-galaxy install -r requirements.yml
ansible-playbook -i localhost, --connection=local setup.yml
|
#!/bin/bash
echo "================================="
echo "== Adding Java to install list =="
echo "================================="
sudo add-apt-repository ppa:webupd8team/java
sudo apt-get update
echo "====================="
echo "== Installing Java8=="
echo "====================="
sudo apt-get install oracle-java8-installer
echo "====================="
echo "== Installing Java7=="
echo "====================="
sudo apt-get install oracle-java7-installer
echo "======================"
echo "== Installing Java6 =="
echo "======================"
sudo apt-get install oracle-java6-installer
echo "=============================="
echo "== Setting Java8 as default =="
echo "=============================="
sudo apt-get install oracle-java8-set-default
echo "============================="
echo "== Downloading Scala 2.11.8=="
echo "============================="
sudo wget "http://www.scala-lang.org/files/archive/scala-2.11.8.deb"
echo "============================"
echo "== Installing Scala 2.11.8=="
echo "============================"
sudo dpkg -i scala-2.11.8.deb
sudo rm -r scala-2.11.8.deb
|
#!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Common utilites for kube-up/kube-down
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(cd $(dirname "${BASH_SOURCE}")/.. && pwd)
DEFAULT_KUBECONFIG="${HOME:-.}/.kube/config"
source "${KUBE_ROOT}/hack/lib/util.sh"
source "${KUBE_ROOT}/cluster/lib/logging.sh"
# KUBE_RELEASE_VERSION_REGEX matches things like "v1.2.3" or "v1.2.3-alpha.4"
#
# NOTE This must match the version_regex in build/common.sh
# kube::release::parse_and_validate_release_version()
KUBE_RELEASE_VERSION_REGEX="^v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)(-([a-zA-Z0-9]+)\\.(0|[1-9][0-9]*))?$"
KUBE_RELEASE_VERSION_DASHED_REGEX="v(0|[1-9][0-9]*)-(0|[1-9][0-9]*)-(0|[1-9][0-9]*)(-([a-zA-Z0-9]+)-(0|[1-9][0-9]*))?"
# KUBE_CI_VERSION_REGEX matches things like "v1.2.3-alpha.4.56+abcdefg" This
#
# NOTE This must match the version_regex in build/common.sh
# kube::release::parse_and_validate_ci_version()
KUBE_CI_VERSION_REGEX="^v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)-([a-zA-Z0-9]+)\\.(0|[1-9][0-9]*)(\\.(0|[1-9][0-9]*)\\+[-0-9a-z]*)?$"
KUBE_CI_VERSION_DASHED_REGEX="^v(0|[1-9][0-9]*)-(0|[1-9][0-9]*)-(0|[1-9][0-9]*)-([a-zA-Z0-9]+)-(0|[1-9][0-9]*)(-(0|[1-9][0-9]*)\\+[-0-9a-z]*)?"
# Generate kubeconfig data for the created cluster.
# Assumed vars:
# KUBE_USER
# KUBE_PASSWORD
# KUBE_MASTER_IP
# KUBECONFIG
# CONTEXT
#
# If the apiserver supports bearer auth, also provide:
# KUBE_BEARER_TOKEN
#
# If the kubeconfig context being created should NOT be set as the current context
# SECONDARY_KUBECONFIG=true
#
# To explicitly name the context being created, use OVERRIDE_CONTEXT
#
# The following can be omitted for --insecure-skip-tls-verify
# KUBE_CERT
# KUBE_KEY
# CA_CERT
function create-kubeconfig() {
KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG}
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
SECONDARY_KUBECONFIG=${SECONDARY_KUBECONFIG:-}
OVERRIDE_CONTEXT=${OVERRIDE_CONTEXT:-}
if [[ "$OVERRIDE_CONTEXT" != "" ]];then
CONTEXT=$OVERRIDE_CONTEXT
fi
# KUBECONFIG determines the file we write to, but it may not exist yet
OLD_IFS=$IFS
IFS=':'
for cfg in ${KUBECONFIG} ; do
if [[ ! -e "${cfg}" ]]; then
mkdir -p "$(dirname "${cfg}")"
touch "${cfg}"
fi
done
IFS=$OLD_IFS
local cluster_args=(
"--server=${KUBE_SERVER:-https://${KUBE_MASTER_IP}}"
)
if [[ -z "${CA_CERT:-}" ]]; then
cluster_args+=("--insecure-skip-tls-verify=true")
else
cluster_args+=(
"--certificate-authority=${CA_CERT}"
"--embed-certs=true"
)
fi
local user_args=()
if [[ ! -z "${KUBE_BEARER_TOKEN:-}" ]]; then
user_args+=(
"--token=${KUBE_BEARER_TOKEN}"
)
elif [[ ! -z "${KUBE_USER:-}" && ! -z "${KUBE_PASSWORD:-}" ]]; then
user_args+=(
"--username=${KUBE_USER}"
"--password=${KUBE_PASSWORD}"
)
fi
if [[ ! -z "${KUBE_CERT:-}" && ! -z "${KUBE_KEY:-}" ]]; then
user_args+=(
"--client-certificate=${KUBE_CERT}"
"--client-key=${KUBE_KEY}"
"--embed-certs=true"
)
fi
KUBECONFIG="${KUBECONFIG}" "${kubectl}" config set-cluster "${CONTEXT}" "${cluster_args[@]}"
if [[ -n "${user_args[@]:-}" ]]; then
KUBECONFIG="${KUBECONFIG}" "${kubectl}" config set-credentials "${CONTEXT}" "${user_args[@]}"
fi
KUBECONFIG="${KUBECONFIG}" "${kubectl}" config set-context "${CONTEXT}" --cluster="${CONTEXT}" --user="${CONTEXT}"
if [[ "${SECONDARY_KUBECONFIG}" != "true" ]];then
KUBECONFIG="${KUBECONFIG}" "${kubectl}" config use-context "${CONTEXT}" --cluster="${CONTEXT}"
fi
# If we have a bearer token, also create a credential entry with basic auth
# so that it is easy to discover the basic auth password for your cluster
# to use in a web browser.
if [[ ! -z "${KUBE_BEARER_TOKEN:-}" && ! -z "${KUBE_USER:-}" && ! -z "${KUBE_PASSWORD:-}" ]]; then
KUBECONFIG="${KUBECONFIG}" "${kubectl}" config set-credentials "${CONTEXT}-basic-auth" "--username=${KUBE_USER}" "--password=${KUBE_PASSWORD}"
fi
echo "Wrote config for ${CONTEXT} to ${KUBECONFIG}"
}
# Clear kubeconfig data for a context
# Assumed vars:
# KUBECONFIG
# CONTEXT
#
# To explicitly name the context being removed, use OVERRIDE_CONTEXT
function clear-kubeconfig() {
export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG}
OVERRIDE_CONTEXT=${OVERRIDE_CONTEXT:-}
if [[ "$OVERRIDE_CONTEXT" != "" ]];then
CONTEXT=$OVERRIDE_CONTEXT
fi
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
# Unset the current-context before we delete it, as otherwise kubectl errors.
local cc=$("${kubectl}" config view -o jsonpath='{.current-context}')
if [[ "${cc}" == "${CONTEXT}" ]]; then
"${kubectl}" config unset current-context
fi
"${kubectl}" config unset "clusters.${CONTEXT}"
"${kubectl}" config unset "users.${CONTEXT}"
"${kubectl}" config unset "users.${CONTEXT}-basic-auth"
"${kubectl}" config unset "contexts.${CONTEXT}"
echo "Cleared config for ${CONTEXT} from ${KUBECONFIG}"
}
# Creates a kubeconfig file with the credentials for only the current-context
# cluster. This is used by federation to create secrets in test setup.
function create-kubeconfig-for-federation() {
if [[ "${FEDERATION:-}" == "true" ]]; then
echo "creating kubeconfig for federation secret"
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
local cc=$("${kubectl}" config view -o jsonpath='{.current-context}')
KUBECONFIG_DIR=$(dirname ${KUBECONFIG:-$DEFAULT_KUBECONFIG})
KUBECONFIG_PATH="${KUBECONFIG_DIR}/federation/kubernetes-apiserver/${cc}"
mkdir -p "${KUBECONFIG_PATH}"
"${kubectl}" config view --minify --flatten > "${KUBECONFIG_PATH}/kubeconfig"
fi
}
function tear_down_alive_resources() {
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
"${kubectl}" delete deployments --all || true
"${kubectl}" delete rc --all || true
"${kubectl}" delete pods --all || true
"${kubectl}" delete svc --all || true
"${kubectl}" delete pvc --all || true
}
# Gets username, password for the current-context in kubeconfig, if they exist.
# Assumed vars:
# KUBECONFIG # if unset, defaults to global
# KUBE_CONTEXT # if unset, defaults to current-context
#
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
#
# KUBE_USER,KUBE_PASSWORD will be empty if no current-context is set, or
# the current-context user does not exist or contain basicauth entries.
function get-kubeconfig-basicauth() {
export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG}
local cc=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.current-context}")
if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then
cc="${KUBE_CONTEXT}"
fi
local user=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.contexts[?(@.name == \"${cc}\")].context.user}")
get-kubeconfig-user-basicauth "${user}"
if [[ -z "${KUBE_USER:-}" || -z "${KUBE_PASSWORD:-}" ]]; then
# kube-up stores username/password in a an additional kubeconfig section
# suffixed with "-basic-auth". Cloudproviders like GKE store in directly
# in the top level section along with the other credential information.
# TODO: Handle this uniformly, either get rid of "basic-auth" or
# consolidate its usage into a function across scripts in cluster/
get-kubeconfig-user-basicauth "${user}-basic-auth"
fi
}
# Sets KUBE_USER and KUBE_PASSWORD to the username and password specified in
# the kubeconfig section corresponding to $1.
#
# Args:
# $1 kubeconfig section to look for basic auth (eg: user or user-basic-auth).
# Assumed vars:
# KUBE_ROOT
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
function get-kubeconfig-user-basicauth() {
KUBE_USER=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.users[?(@.name == \"$1\")].user.username}")
KUBE_PASSWORD=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.users[?(@.name == \"$1\")].user.password}")
}
# Generate basic auth user and password.
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
function gen-kube-basicauth() {
KUBE_USER=admin
KUBE_PASSWORD=$(python -c 'import string,random; print("".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16)))')
}
# Get the bearer token for the current-context in kubeconfig if one exists.
# Assumed vars:
# KUBECONFIG # if unset, defaults to global
# KUBE_CONTEXT # if unset, defaults to current-context
#
# Vars set:
# KUBE_BEARER_TOKEN
#
# KUBE_BEARER_TOKEN will be empty if no current-context is set, or the
# current-context user does not exist or contain a bearer token entry.
function get-kubeconfig-bearertoken() {
export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG}
local cc=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.current-context}")
if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then
cc="${KUBE_CONTEXT}"
fi
local user=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.contexts[?(@.name == \"${cc}\")].context.user}")
KUBE_BEARER_TOKEN=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.users[?(@.name == \"${user}\")].user.token}")
}
# Generate bearer token.
#
# Vars set:
# KUBE_BEARER_TOKEN
function gen-kube-bearertoken() {
KUBE_BEARER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
}
# Generate uid
# This function only works on systems with python. It generates a time based
# UID instead of a UUID because GCE has a name length limit.
#
# Vars set:
# KUBE_UID
function gen-uid {
KUBE_UID=$(python -c 'import uuid; print(uuid.uuid1().fields[0])')
}
function load-or-gen-kube-basicauth() {
if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then
get-kubeconfig-basicauth
fi
if [[ -z "${KUBE_USER:-}" || -z "${KUBE_PASSWORD:-}" ]]; then
gen-kube-basicauth
fi
# Make sure they don't contain any funny characters.
if ! [[ "${KUBE_USER}" =~ ^[-._@a-zA-Z0-9]+$ ]]; then
echo "Bad KUBE_USER string."
exit 1
fi
if ! [[ "${KUBE_PASSWORD}" =~ ^[-._@#%/a-zA-Z0-9]+$ ]]; then
echo "Bad KUBE_PASSWORD string."
exit 1
fi
}
function load-or-gen-kube-bearertoken() {
if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then
get-kubeconfig-bearertoken
fi
if [[ -z "${KUBE_BEARER_TOKEN:-}" ]]; then
gen-kube-bearertoken
fi
}
# Get the master IP for the current-context in kubeconfig if one exists.
#
# Assumed vars:
# KUBECONFIG # if unset, defaults to global
# KUBE_CONTEXT # if unset, defaults to current-context
#
# Vars set:
# KUBE_MASTER_URL
#
# KUBE_MASTER_URL will be empty if no current-context is set, or the
# current-context user does not exist or contain a server entry.
function detect-master-from-kubeconfig() {
export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG}
local cc=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.current-context}")
if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then
cc="${KUBE_CONTEXT}"
fi
local cluster=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.contexts[?(@.name == \"${cc}\")].context.cluster}")
KUBE_MASTER_URL=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.clusters[?(@.name == \"${cluster}\")].cluster.server}")
}
# Sets KUBE_VERSION variable to the proper version number (e.g. "v1.0.6",
# "v1.2.0-alpha.1.881+376438b69c7612") or a version' publication of the form
# <path>/<version> (e.g. "release/stable",' "ci/latest-1").
#
# See the docs on getting builds for more information about version
# publication.
#
# Args:
# $1 version string from command line
# Vars set:
# KUBE_VERSION
function set_binary_version() {
if [[ "${1}" =~ "/" ]]; then
IFS='/' read -a path <<< "${1}"
if [[ "${path[0]}" == "release" ]]; then
KUBE_VERSION=$(gsutil cat "gs://kubernetes-release/${1}.txt")
else
KUBE_VERSION=$(gsutil cat "gs://kubernetes-release-dev/${1}.txt")
fi
else
KUBE_VERSION=${1}
fi
}
# Figure out which binary use on the server and assure it is available.
# If KUBE_VERSION is specified use binaries specified by it, otherwise
# use local dev binaries.
#
# Assumed vars:
# KUBE_VERSION
# KUBE_RELEASE_VERSION_REGEX
# KUBE_CI_VERSION_REGEX
# Vars set:
# KUBE_TAR_HASH
# SERVER_BINARY_TAR_URL
# SERVER_BINARY_TAR_HASH
# SALT_TAR_URL
# SALT_TAR_HASH
function tars_from_version() {
local sha1sum=""
if which sha1sum >/dev/null 2>&1; then
sha1sum="sha1sum"
else
sha1sum="shasum -a1"
fi
if [[ -z "${KUBE_VERSION-}" ]]; then
find-release-tars
upload-server-tars
elif [[ ${KUBE_VERSION} =~ ${KUBE_RELEASE_VERSION_REGEX} ]]; then
SERVER_BINARY_TAR_URL="https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz"
SALT_TAR_URL="https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/kubernetes-salt.tar.gz"
# TODO: Clean this up.
KUBE_MANIFESTS_TAR_URL="${SERVER_BINARY_TAR_URL/server-linux-amd64/manifests}"
KUBE_MANIFESTS_TAR_HASH=$(curl ${KUBE_MANIFESTS_TAR_URL} --silent --show-error | ${sha1sum} | awk '{print $1}')
elif [[ ${KUBE_VERSION} =~ ${KUBE_CI_VERSION_REGEX} ]]; then
SERVER_BINARY_TAR_URL="https://storage.googleapis.com/kubernetes-release-dev/ci/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz"
SALT_TAR_URL="https://storage.googleapis.com/kubernetes-release-dev/ci/${KUBE_VERSION}/kubernetes-salt.tar.gz"
# TODO: Clean this up.
KUBE_MANIFESTS_TAR_URL="${SERVER_BINARY_TAR_URL/server-linux-amd64/manifests}"
KUBE_MANIFESTS_TAR_HASH=$(curl ${KUBE_MANIFESTS_TAR_URL} --silent --show-error | ${sha1sum} | awk '{print $1}')
else
echo "Version doesn't match regexp" >&2
exit 1
fi
if ! SERVER_BINARY_TAR_HASH=$(curl -Ss --fail "${SERVER_BINARY_TAR_URL}.sha1"); then
echo "Failure trying to curl release .sha1"
fi
if ! SALT_TAR_HASH=$(curl -Ss --fail "${SALT_TAR_URL}.sha1"); then
echo "Failure trying to curl Salt tar .sha1"
fi
if ! curl -Ss --head "${SERVER_BINARY_TAR_URL}" >&/dev/null; then
echo "Can't find release at ${SERVER_BINARY_TAR_URL}" >&2
exit 1
fi
if ! curl -Ss --head "${SALT_TAR_URL}" >&/dev/null; then
echo "Can't find Salt tar at ${SALT_TAR_URL}" >&2
exit 1
fi
}
# Search for the specified tarball in the various known output locations,
# echoing the location if found.
#
# Assumed vars:
# KUBE_ROOT
#
# Args:
# $1 name of tarball to search for
function find-tar() {
local -r tarball=$1
locations=(
"${KUBE_ROOT}/server/${tarball}"
"${KUBE_ROOT}/_output/release-tars/${tarball}"
"${KUBE_ROOT}/bazel-bin/build/release-tars/${tarball}"
)
location=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 )
if [[ ! -f "${location}" ]]; then
echo "!!! Cannot find ${tarball}" >&2
exit 1
fi
echo "${location}"
}
# Verify and find the various tar files that we are going to use on the server.
#
# Assumed vars:
# KUBE_ROOT
# Vars set:
# SERVER_BINARY_TAR
# SALT_TAR
# KUBE_MANIFESTS_TAR
function find-release-tars() {
SERVER_BINARY_TAR=$(find-tar kubernetes-server-linux-amd64.tar.gz)
SALT_TAR=$(find-tar kubernetes-salt.tar.gz)
# This tarball is used by GCI, Ubuntu Trusty, and Container Linux.
KUBE_MANIFESTS_TAR=
if [[ "${MASTER_OS_DISTRIBUTION:-}" == "trusty" || "${MASTER_OS_DISTRIBUTION:-}" == "gci" || "${MASTER_OS_DISTRIBUTION:-}" == "container-linux" || "${MASTER_OS_DISTRIBUTION:-}" == "ubuntu" ]] || \
[[ "${NODE_OS_DISTRIBUTION:-}" == "trusty" || "${NODE_OS_DISTRIBUTION:-}" == "gci" || "${NODE_OS_DISTRIBUTION:-}" == "container-linux" || "${NODE_OS_DISTRIBUTION:-}" == "ubuntu" ]] ; then
KUBE_MANIFESTS_TAR=$(find-tar kubernetes-manifests.tar.gz)
fi
}
# Discover the git version of the current build package
#
# Assumed vars:
# KUBE_ROOT
# Vars set:
# KUBE_GIT_VERSION
function find-release-version() {
KUBE_GIT_VERSION=""
if [[ -f "${KUBE_ROOT}/version" ]]; then
KUBE_GIT_VERSION="$(cat ${KUBE_ROOT}/version)"
fi
if [[ -f "${KUBE_ROOT}/_output/release-stage/full/kubernetes/version" ]]; then
KUBE_GIT_VERSION="$(cat ${KUBE_ROOT}/_output/release-stage/full/kubernetes/version)"
fi
if [[ -z "${KUBE_GIT_VERSION}" ]]; then
echo "!!! Cannot find release version"
exit 1
fi
}
function stage-images() {
find-release-version
find-release-tars
KUBE_IMAGE_TAG="$(echo """${KUBE_GIT_VERSION}""" | sed 's/+/-/g')"
local docker_wrapped_binaries=(
"kube-apiserver"
"kube-controller-manager"
"kube-scheduler"
"kube-proxy"
)
local docker_cmd=("docker")
if [[ "${KUBE_DOCKER_REGISTRY}" == "gcr.io/"* ]]; then
local docker_push_cmd=("gcloud" "docker")
else
local docker_push_cmd=("${docker_cmd[@]}")
fi
local temp_dir="$(mktemp -d -t 'kube-server-XXXX')"
tar xzfv "${SERVER_BINARY_TAR}" -C "${temp_dir}" &> /dev/null
for binary in "${docker_wrapped_binaries[@]}"; do
local docker_tag="$(cat ${temp_dir}/kubernetes/server/bin/${binary}.docker_tag)"
(
"${docker_cmd[@]}" load -i "${temp_dir}/kubernetes/server/bin/${binary}.tar"
"${docker_cmd[@]}" rmi "${KUBE_DOCKER_REGISTRY}/${binary}:${KUBE_IMAGE_TAG}" 2>/dev/null || true
"${docker_cmd[@]}" tag "gcr.io/google_containers/${binary}:${docker_tag}" "${KUBE_DOCKER_REGISTRY}/${binary}:${KUBE_IMAGE_TAG}"
"${docker_push_cmd[@]}" push "${KUBE_DOCKER_REGISTRY}/${binary}:${KUBE_IMAGE_TAG}"
) &> "${temp_dir}/${binary}-push.log" &
done
kube::util::wait-for-jobs || {
kube::log::error "unable to push images. See ${temp_dir}/*.log for more info."
return 1
}
rm -rf "${temp_dir}"
return 0
}
# Quote something appropriate for a yaml string.
#
# TODO(zmerlynn): Note that this function doesn't so much "quote" as
# "strip out quotes", and we really should be using a YAML library for
# this, but PyYAML isn't shipped by default, and *rant rant rant ... SIGH*
function yaml-quote {
echo "'$(echo "${@:-}" | sed -e "s/'/''/g")'"
}
# Builds the RUNTIME_CONFIG var from other feature enable options (such as
# features in alpha)
function build-runtime-config() {
# There is nothing to do here for now. Just using this function as a placeholder.
:
}
# Writes the cluster name into a temporary file.
# Assumed vars
# CLUSTER_NAME
function write-cluster-name {
cat >"${KUBE_TEMP}/cluster-name.txt" << EOF
${CLUSTER_NAME}
EOF
}
function write-master-env {
# If the user requested that the master be part of the cluster, set the
# environment variable to program the master kubelet to register itself.
if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" && -z "${KUBELET_APISERVER:-}" ]]; then
KUBELET_APISERVER="${MASTER_NAME}"
fi
if [[ -z "${KUBERNETES_MASTER_NAME:-}" ]]; then
KUBERNETES_MASTER_NAME="${MASTER_NAME}"
fi
build-kube-env true "${KUBE_TEMP}/master-kube-env.yaml"
build-kube-master-certs "${KUBE_TEMP}/kube-master-certs.yaml"
}
function write-node-env {
if [[ -z "${KUBERNETES_MASTER_NAME:-}" ]]; then
KUBERNETES_MASTER_NAME="${MASTER_NAME}"
fi
build-kube-env false "${KUBE_TEMP}/node-kube-env.yaml"
}
function build-kube-master-certs {
local file=$1
rm -f ${file}
cat >$file <<EOF
KUBEAPISERVER_CERT: $(yaml-quote ${KUBEAPISERVER_CERT_BASE64:-})
KUBEAPISERVER_KEY: $(yaml-quote ${KUBEAPISERVER_KEY_BASE64:-})
CA_KEY: $(yaml-quote ${CA_KEY_BASE64:-})
AGGREGATOR_CA_KEY: $(yaml-quote ${AGGREGATOR_CA_KEY_BASE64:-})
REQUESTHEADER_CA_CERT: $(yaml-quote ${REQUESTHEADER_CA_CERT_BASE64:-})
PROXY_CLIENT_CERT: $(yaml-quote ${PROXY_CLIENT_CERT_BASE64:-})
PROXY_CLIENT_KEY: $(yaml-quote ${PROXY_CLIENT_KEY_BASE64:-})
EOF
}
# $1: if 'true', we're building a master yaml, else a node
function build-kube-env {
local master=$1
local file=$2
local server_binary_tar_url=$SERVER_BINARY_TAR_URL
local salt_tar_url=$SALT_TAR_URL
local kube_manifests_tar_url="${KUBE_MANIFESTS_TAR_URL:-}"
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "container-linux" ]] || \
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "container-linux" ]] || \
[[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]] ; then
# TODO: Support fallback .tar.gz settings on Container Linux
server_binary_tar_url=$(split_csv "${SERVER_BINARY_TAR_URL}")
salt_tar_url=$(split_csv "${SALT_TAR_URL}")
kube_manifests_tar_url=$(split_csv "${KUBE_MANIFESTS_TAR_URL}")
fi
build-runtime-config
gen-uid
rm -f ${file}
cat >$file <<EOF
CLUSTER_NAME: $(yaml-quote ${CLUSTER_NAME})
ENV_TIMESTAMP: $(yaml-quote $(date -u +%Y-%m-%dT%T%z))
INSTANCE_PREFIX: $(yaml-quote ${INSTANCE_PREFIX})
NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX})
NODE_TAGS: $(yaml-quote ${NODE_TAGS:-})
NODE_NETWORK: $(yaml-quote ${NETWORK:-})
NODE_SUBNETWORK: $(yaml-quote ${SUBNETWORK:-})
CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16})
SERVER_BINARY_TAR_URL: $(yaml-quote ${server_binary_tar_url})
SERVER_BINARY_TAR_HASH: $(yaml-quote ${SERVER_BINARY_TAR_HASH})
PROJECT_ID: $(yaml-quote ${PROJECT})
NETWORK_PROJECT_ID: $(yaml-quote ${NETWORK_PROJECT})
SALT_TAR_URL: $(yaml-quote ${salt_tar_url})
SALT_TAR_HASH: $(yaml-quote ${SALT_TAR_HASH})
SERVICE_CLUSTER_IP_RANGE: $(yaml-quote ${SERVICE_CLUSTER_IP_RANGE})
KUBERNETES_MASTER_NAME: $(yaml-quote ${KUBERNETES_MASTER_NAME})
ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false})
ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none})
ENABLE_METRICS_SERVER: $(yaml-quote ${ENABLE_METRICS_SERVER:-false})
DOCKER_REGISTRY_MIRROR_URL: $(yaml-quote ${DOCKER_REGISTRY_MIRROR_URL:-})
ENABLE_L7_LOADBALANCING: $(yaml-quote ${ENABLE_L7_LOADBALANCING:-none})
ENABLE_CLUSTER_LOGGING: $(yaml-quote ${ENABLE_CLUSTER_LOGGING:-false})
ENABLE_CLUSTER_UI: $(yaml-quote ${ENABLE_CLUSTER_UI:-false})
ENABLE_NODE_PROBLEM_DETECTOR: $(yaml-quote ${ENABLE_NODE_PROBLEM_DETECTOR:-none})
NODE_PROBLEM_DETECTOR_VERSION: $(yaml-quote ${NODE_PROBLEM_DETECTOR_VERSION:-})
NODE_PROBLEM_DETECTOR_TAR_HASH: $(yaml-quote ${NODE_PROBLEM_DETECTOR_TAR_HASH:-})
ENABLE_NODE_LOGGING: $(yaml-quote ${ENABLE_NODE_LOGGING:-false})
ENABLE_RESCHEDULER: $(yaml-quote ${ENABLE_RESCHEDULER:-false})
LOGGING_DESTINATION: $(yaml-quote ${LOGGING_DESTINATION:-})
ELASTICSEARCH_LOGGING_REPLICAS: $(yaml-quote ${ELASTICSEARCH_LOGGING_REPLICAS:-})
ENABLE_CLUSTER_DNS: $(yaml-quote ${ENABLE_CLUSTER_DNS:-false})
ENABLE_CLUSTER_REGISTRY: $(yaml-quote ${ENABLE_CLUSTER_REGISTRY:-false})
CLUSTER_REGISTRY_DISK: $(yaml-quote ${CLUSTER_REGISTRY_DISK:-})
CLUSTER_REGISTRY_DISK_SIZE: $(yaml-quote ${CLUSTER_REGISTRY_DISK_SIZE:-})
DNS_SERVER_IP: $(yaml-quote ${DNS_SERVER_IP:-})
DNS_DOMAIN: $(yaml-quote ${DNS_DOMAIN:-})
ENABLE_DNS_HORIZONTAL_AUTOSCALER: $(yaml-quote ${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false})
KUBELET_TOKEN: $(yaml-quote ${KUBELET_TOKEN:-})
KUBE_PROXY_DAEMONSET: $(yaml-quote ${KUBE_PROXY_DAEMONSET:-false})
KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-})
NODE_PROBLEM_DETECTOR_TOKEN: $(yaml-quote ${NODE_PROBLEM_DETECTOR_TOKEN:-})
ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-})
ENABLE_POD_SECURITY_POLICY: $(yaml-quote ${ENABLE_POD_SECURITY_POLICY:-})
MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE})
RUNTIME_CONFIG: $(yaml-quote ${RUNTIME_CONFIG})
CA_CERT: $(yaml-quote ${CA_CERT_BASE64:-})
KUBELET_CERT: $(yaml-quote ${KUBELET_CERT_BASE64:-})
KUBELET_KEY: $(yaml-quote ${KUBELET_KEY_BASE64:-})
NETWORK_PROVIDER: $(yaml-quote ${NETWORK_PROVIDER:-})
NETWORK_POLICY_PROVIDER: $(yaml-quote ${NETWORK_POLICY_PROVIDER:-})
PREPULL_E2E_IMAGES: $(yaml-quote ${PREPULL_E2E_IMAGES:-})
HAIRPIN_MODE: $(yaml-quote ${HAIRPIN_MODE:-})
SOFTLOCKUP_PANIC: $(yaml-quote ${SOFTLOCKUP_PANIC:-})
OPENCONTRAIL_TAG: $(yaml-quote ${OPENCONTRAIL_TAG:-})
OPENCONTRAIL_KUBERNETES_TAG: $(yaml-quote ${OPENCONTRAIL_KUBERNETES_TAG:-})
OPENCONTRAIL_PUBLIC_SUBNET: $(yaml-quote ${OPENCONTRAIL_PUBLIC_SUBNET:-})
E2E_STORAGE_TEST_ENVIRONMENT: $(yaml-quote ${E2E_STORAGE_TEST_ENVIRONMENT:-})
KUBE_IMAGE_TAG: $(yaml-quote ${KUBE_IMAGE_TAG:-})
KUBE_DOCKER_REGISTRY: $(yaml-quote ${KUBE_DOCKER_REGISTRY:-})
KUBE_ADDON_REGISTRY: $(yaml-quote ${KUBE_ADDON_REGISTRY:-})
MULTIZONE: $(yaml-quote ${MULTIZONE:-})
NON_MASQUERADE_CIDR: $(yaml-quote ${NON_MASQUERADE_CIDR:-})
KUBE_UID: $(yaml-quote ${KUBE_UID:-})
ENABLE_DEFAULT_STORAGE_CLASS: $(yaml-quote ${ENABLE_DEFAULT_STORAGE_CLASS:-})
ENABLE_APISERVER_BASIC_AUDIT: $(yaml-quote ${ENABLE_APISERVER_BASIC_AUDIT:-})
ENABLE_APISERVER_ADVANCED_AUDIT: $(yaml-quote ${ENABLE_APISERVER_ADVANCED_AUDIT:-})
ENABLE_CACHE_MUTATION_DETECTOR: $(yaml-quote ${ENABLE_CACHE_MUTATION_DETECTOR:-false})
ENABLE_PATCH_CONVERSION_DETECTOR: $(yaml-quote ${ENABLE_PATCH_CONVERSION_DETECTOR:-false})
ADVANCED_AUDIT_POLICY: $(yaml-quote ${ADVANCED_AUDIT_POLICY:-})
ADVANCED_AUDIT_BACKEND: $(yaml-quote ${ADVANCED_AUDIT_BACKEND:-log})
GCE_API_ENDPOINT: $(yaml-quote ${GCE_API_ENDPOINT:-})
PROMETHEUS_TO_SD_ENDPOINT: $(yaml-quote ${PROMETHEUS_TO_SD_ENDPOINT:-})
PROMETHEUS_TO_SD_PREFIX: $(yaml-quote ${PROMETHEUS_TO_SD_PREFIX:-})
ENABLE_PROMETHEUS_TO_SD: $(yaml-quote ${ENABLE_PROMETHEUS_TO_SD:-false})
ENABLE_POD_PRIORITY: $(yaml-quote ${ENABLE_POD_PRIORITY:-})
EOF
if [ -n "${KUBELET_PORT:-}" ]; then
cat >>$file <<EOF
KUBELET_PORT: $(yaml-quote ${KUBELET_PORT})
EOF
fi
if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}" ]; then
cat >>$file <<EOF
KUBE_APISERVER_REQUEST_TIMEOUT: $(yaml-quote ${KUBE_APISERVER_REQUEST_TIMEOUT})
EOF
fi
if [ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]; then
cat >>$file <<EOF
TERMINATED_POD_GC_THRESHOLD: $(yaml-quote ${TERMINATED_POD_GC_THRESHOLD})
EOF
fi
if [[ "${master}" == "true" && ("${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "container-linux") || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \
[[ "${master}" == "false" && ("${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "container-linux") || "${NODE_OS_DISTRIBUTION}" = "ubuntu" ]] ; then
cat >>$file <<EOF
KUBE_MANIFESTS_TAR_URL: $(yaml-quote ${kube_manifests_tar_url})
KUBE_MANIFESTS_TAR_HASH: $(yaml-quote ${KUBE_MANIFESTS_TAR_HASH})
EOF
fi
if [ -n "${TEST_CLUSTER:-}" ]; then
cat >>$file <<EOF
TEST_CLUSTER: $(yaml-quote ${TEST_CLUSTER})
EOF
fi
if [ -n "${KUBELET_TEST_ARGS:-}" ]; then
cat >>$file <<EOF
KUBELET_TEST_ARGS: $(yaml-quote ${KUBELET_TEST_ARGS})
EOF
fi
if [ -n "${NODE_KUBELET_TEST_ARGS:-}" ]; then
cat >>$file <<EOF
NODE_KUBELET_TEST_ARGS: $(yaml-quote ${NODE_KUBELET_TEST_ARGS})
EOF
fi
if [ -n "${MASTER_KUBELET_TEST_ARGS:-}" ]; then
cat >>$file <<EOF
MASTER_KUBELET_TEST_ARGS: $(yaml-quote ${MASTER_KUBELET_TEST_ARGS})
EOF
fi
if [ -n "${KUBELET_TEST_LOG_LEVEL:-}" ]; then
cat >>$file <<EOF
KUBELET_TEST_LOG_LEVEL: $(yaml-quote ${KUBELET_TEST_LOG_LEVEL})
EOF
fi
if [ -n "${DOCKER_TEST_LOG_LEVEL:-}" ]; then
cat >>$file <<EOF
DOCKER_TEST_LOG_LEVEL: $(yaml-quote ${DOCKER_TEST_LOG_LEVEL})
EOF
fi
if [ -n "${DOCKER_LOG_DRIVER:-}" ]; then
cat >>$file <<EOF
DOCKER_LOG_DRIVER: $(yaml-quote ${DOCKER_LOG_DRIVER})
EOF
fi
if [ -n "${DOCKER_LOG_MAX_SIZE:-}" ]; then
cat >>$file <<EOF
DOCKER_LOG_MAX_SIZE: $(yaml-quote ${DOCKER_LOG_MAX_SIZE})
EOF
fi
if [ -n "${DOCKER_LOG_MAX_FILE:-}" ]; then
cat >>$file <<EOF
DOCKER_LOG_MAX_FILE: $(yaml-quote ${DOCKER_LOG_MAX_FILE})
EOF
fi
if [ -n "${ENABLE_CUSTOM_METRICS:-}" ]; then
cat >>$file <<EOF
ENABLE_CUSTOM_METRICS: $(yaml-quote ${ENABLE_CUSTOM_METRICS})
EOF
fi
if [ -n "${ENABLE_METADATA_PROXY:-}" ]; then
cat >>$file <<EOF
ENABLE_METADATA_PROXY: $(yaml-quote ${ENABLE_METADATA_PROXY})
EOF
fi
if [ -n "${KUBE_FIREWALL_METADATA_SERVER:-}" ]; then
cat >>$file <<EOF
KUBE_FIREWALL_METADATA_SERVER: $(yaml-quote ${KUBE_FIREWALL_METADATA_SERVER})
EOF
fi
if [ -n "${FEATURE_GATES:-}" ]; then
cat >>$file <<EOF
FEATURE_GATES: $(yaml-quote ${FEATURE_GATES})
EOF
fi
if [ -n "${ROTATE_CERTIFICATES:-}" ]; then
cat >>$file <<EOF
ROTATE_CERTIFICATES: $(yaml-quote ${ROTATE_CERTIFICATES})
EOF
fi
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "gci" ]] ||
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "gci" ]]; then
cat >>$file <<EOF
VOLUME_PLUGIN_DIR: $(yaml-quote ${VOLUME_PLUGIN_DIR:-/etc/srv/kubernetes/kubelet-plugins/volume/exec})
EOF
fi
if [ -n "${PROVIDER_VARS:-}" ]; then
local var_name
local var_value
for var_name in ${PROVIDER_VARS}; do
eval "local var_value=\$(yaml-quote \${${var_name}})"
cat >>$file <<EOF
${var_name}: ${var_value}
EOF
done
fi
if [[ "${master}" == "true" ]]; then
# Master-only env vars.
cat >>$file <<EOF
KUBERNETES_MASTER: $(yaml-quote "true")
KUBE_USER: $(yaml-quote ${KUBE_USER})
KUBE_PASSWORD: $(yaml-quote ${KUBE_PASSWORD})
KUBE_BEARER_TOKEN: $(yaml-quote ${KUBE_BEARER_TOKEN})
MASTER_CERT: $(yaml-quote ${MASTER_CERT_BASE64:-})
MASTER_KEY: $(yaml-quote ${MASTER_KEY_BASE64:-})
KUBECFG_CERT: $(yaml-quote ${KUBECFG_CERT_BASE64:-})
KUBECFG_KEY: $(yaml-quote ${KUBECFG_KEY_BASE64:-})
KUBELET_APISERVER: $(yaml-quote ${KUBELET_APISERVER:-})
ENABLE_MANIFEST_URL: $(yaml-quote ${ENABLE_MANIFEST_URL:-false})
MANIFEST_URL: $(yaml-quote ${MANIFEST_URL:-})
MANIFEST_URL_HEADER: $(yaml-quote ${MANIFEST_URL_HEADER:-})
NUM_NODES: $(yaml-quote ${NUM_NODES})
STORAGE_BACKEND: $(yaml-quote ${STORAGE_BACKEND:-etcd3})
STORAGE_MEDIA_TYPE: $(yaml-quote ${STORAGE_MEDIA_TYPE:-})
ENABLE_GARBAGE_COLLECTOR: $(yaml-quote ${ENABLE_GARBAGE_COLLECTOR:-})
ENABLE_LEGACY_ABAC: $(yaml-quote ${ENABLE_LEGACY_ABAC:-})
MASTER_ADVERTISE_ADDRESS: $(yaml-quote ${MASTER_ADVERTISE_ADDRESS:-})
ETCD_CA_KEY: $(yaml-quote ${ETCD_CA_KEY_BASE64:-})
ETCD_CA_CERT: $(yaml-quote ${ETCD_CA_CERT_BASE64:-})
ETCD_PEER_KEY: $(yaml-quote ${ETCD_PEER_KEY_BASE64:-})
ETCD_PEER_CERT: $(yaml-quote ${ETCD_PEER_CERT_BASE64:-})
EOF
# KUBE_APISERVER_REQUEST_TIMEOUT_SEC (if set) controls the --request-timeout
# flag
if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT_SEC:-}" ]; then
cat >>$file <<EOF
KUBE_APISERVER_REQUEST_TIMEOUT_SEC: $(yaml-quote ${KUBE_APISERVER_REQUEST_TIMEOUT_SEC})
EOF
fi
# ETCD_IMAGE (if set) allows to use a custom etcd image.
if [ -n "${ETCD_IMAGE:-}" ]; then
cat >>$file <<EOF
ETCD_IMAGE: $(yaml-quote ${ETCD_IMAGE})
EOF
fi
# ETCD_VERSION (if set) allows you to use custom version of etcd.
# The main purpose of using it may be rollback of etcd v3 API,
# where we need 3.0.* image, but are rolling back to 2.3.7.
if [ -n "${ETCD_VERSION:-}" ]; then
cat >>$file <<EOF
ETCD_VERSION: $(yaml-quote ${ETCD_VERSION})
EOF
fi
if [ -n "${ETCD_HOSTNAME:-}" ]; then
cat >>$file <<EOF
ETCD_HOSTNAME: $(yaml-quote ${ETCD_HOSTNAME})
EOF
fi
if [ -n "${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then
cat >>$file <<EOF
ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC: $(yaml-quote ${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC})
EOF
fi
if [ -n "${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then
cat >>$file <<EOF
KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC: $(yaml-quote ${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC})
EOF
fi
if [ -n "${APISERVER_TEST_ARGS:-}" ]; then
cat >>$file <<EOF
APISERVER_TEST_ARGS: $(yaml-quote ${APISERVER_TEST_ARGS})
EOF
fi
if [ -n "${APISERVER_TEST_LOG_LEVEL:-}" ]; then
cat >>$file <<EOF
APISERVER_TEST_LOG_LEVEL: $(yaml-quote ${APISERVER_TEST_LOG_LEVEL})
EOF
fi
if [ -n "${CONTROLLER_MANAGER_TEST_ARGS:-}" ]; then
cat >>$file <<EOF
CONTROLLER_MANAGER_TEST_ARGS: $(yaml-quote ${CONTROLLER_MANAGER_TEST_ARGS})
EOF
fi
if [ -n "${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-}" ]; then
cat >>$file <<EOF
CONTROLLER_MANAGER_TEST_LOG_LEVEL: $(yaml-quote ${CONTROLLER_MANAGER_TEST_LOG_LEVEL})
EOF
fi
if [ -n "${SCHEDULER_TEST_ARGS:-}" ]; then
cat >>$file <<EOF
SCHEDULER_TEST_ARGS: $(yaml-quote ${SCHEDULER_TEST_ARGS})
EOF
fi
if [ -n "${SCHEDULER_TEST_LOG_LEVEL:-}" ]; then
cat >>$file <<EOF
SCHEDULER_TEST_LOG_LEVEL: $(yaml-quote ${SCHEDULER_TEST_LOG_LEVEL})
EOF
fi
if [ -n "${INITIAL_ETCD_CLUSTER:-}" ]; then
cat >>$file <<EOF
INITIAL_ETCD_CLUSTER: $(yaml-quote ${INITIAL_ETCD_CLUSTER})
EOF
fi
if [ -n "${INITIAL_ETCD_CLUSTER_STATE:-}" ]; then
cat >>$file <<EOF
INITIAL_ETCD_CLUSTER_STATE: $(yaml-quote ${INITIAL_ETCD_CLUSTER_STATE})
EOF
fi
if [ -n "${ETCD_QUORUM_READ:-}" ]; then
cat >>$file <<EOF
ETCD_QUORUM_READ: $(yaml-quote ${ETCD_QUORUM_READ})
EOF
fi
if [ -n "${CLUSTER_SIGNING_DURATION:-}" ]; then
cat >>$file <<EOF
CLUSTER_SIGNING_DURATION: $(yaml-quote ${CLUSTER_SIGNING_DURATION})
EOF
fi
else
# Node-only env vars.
cat >>$file <<EOF
KUBERNETES_MASTER: $(yaml-quote "false")
ZONE: $(yaml-quote ${ZONE})
EXTRA_DOCKER_OPTS: $(yaml-quote ${EXTRA_DOCKER_OPTS:-})
EOF
if [ -n "${KUBEPROXY_TEST_ARGS:-}" ]; then
cat >>$file <<EOF
KUBEPROXY_TEST_ARGS: $(yaml-quote ${KUBEPROXY_TEST_ARGS})
EOF
fi
if [ -n "${KUBEPROXY_TEST_LOG_LEVEL:-}" ]; then
cat >>$file <<EOF
KUBEPROXY_TEST_LOG_LEVEL: $(yaml-quote ${KUBEPROXY_TEST_LOG_LEVEL})
EOF
fi
fi
if [ -n "${NODE_LABELS:-}" ]; then
cat >>$file <<EOF
NODE_LABELS: $(yaml-quote ${NODE_LABELS})
EOF
fi
if [ -n "${EVICTION_HARD:-}" ]; then
cat >>$file <<EOF
EVICTION_HARD: $(yaml-quote ${EVICTION_HARD})
EOF
fi
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "container-linux" ]] || \
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "container-linux" ]]; then
# Container-Linux-only env vars. TODO(yifan): Make them available on other distros.
cat >>$file <<EOF
KUBERNETES_CONTAINER_RUNTIME: $(yaml-quote ${CONTAINER_RUNTIME:-rkt})
RKT_VERSION: $(yaml-quote ${RKT_VERSION:-})
RKT_PATH: $(yaml-quote ${RKT_PATH:-})
RKT_STAGE1_IMAGE: $(yaml-quote ${RKT_STAGE1_IMAGE:-})
EOF
fi
if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
cat >>$file <<EOF
ENABLE_CLUSTER_AUTOSCALER: $(yaml-quote ${ENABLE_CLUSTER_AUTOSCALER})
AUTOSCALER_MIG_CONFIG: $(yaml-quote ${AUTOSCALER_MIG_CONFIG})
AUTOSCALER_EXPANDER_CONFIG: $(yaml-quote ${AUTOSCALER_EXPANDER_CONFIG})
EOF
fi
# Federation specific environment variables.
if [[ -n "${FEDERATION:-}" ]]; then
cat >>$file <<EOF
FEDERATION: $(yaml-quote ${FEDERATION})
EOF
fi
if [ -n "${FEDERATION_NAME:-}" ]; then
cat >>$file <<EOF
FEDERATION_NAME: $(yaml-quote ${FEDERATION_NAME})
EOF
fi
if [ -n "${DNS_ZONE_NAME:-}" ]; then
cat >>$file <<EOF
DNS_ZONE_NAME: $(yaml-quote ${DNS_ZONE_NAME})
EOF
fi
if [ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]; then
cat >>$file <<EOF
SCHEDULING_ALGORITHM_PROVIDER: $(yaml-quote ${SCHEDULING_ALGORITHM_PROVIDER})
EOF
fi
}
function sha1sum-file() {
if which sha1sum >/dev/null 2>&1; then
sha1sum "$1" | awk '{ print $1 }'
else
shasum -a1 "$1" | awk '{ print $1 }'
fi
}
# Create certificate pairs for the cluster.
# $1: The public IP for the master.
#
# These are used for static cert distribution (e.g. static clustering) at
# cluster creation time. This will be obsoleted once we implement dynamic
# clustering.
#
# The following certificate pairs are created:
#
# - ca (the cluster's certificate authority)
# - server
# - kubelet
# - kubecfg (for kubectl)
#
# TODO(roberthbailey): Replace easyrsa with a simple Go program to generate
# the certs that we need.
#
# Assumed vars
# KUBE_TEMP
# MASTER_NAME
#
# Vars set:
# CERT_DIR
# CA_CERT_BASE64
# MASTER_CERT_BASE64
# MASTER_KEY_BASE64
# KUBELET_CERT_BASE64
# KUBELET_KEY_BASE64
# KUBECFG_CERT_BASE64
# KUBECFG_KEY_BASE64
function create-certs {
local -r primary_cn="${1}"
# Determine extra certificate names for master
local octets=($(echo "${SERVICE_CLUSTER_IP_RANGE}" | sed -e 's|/.*||' -e 's/\./ /g'))
((octets[3]+=1))
local -r service_ip=$(echo "${octets[*]}" | sed 's/ /./g')
local sans=""
for extra in $@; do
if [[ -n "${extra}" ]]; then
sans="${sans}IP:${extra},"
fi
done
sans="${sans}IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${DNS_DOMAIN},DNS:${MASTER_NAME}"
echo "Generating certs for alternate-names: ${sans}"
setup-easyrsa
PRIMARY_CN="${primary_cn}" SANS="${sans}" generate-certs
AGGREGATOR_PRIMARY_CN="${primary_cn}" AGGREGATOR_SANS="${sans}" generate-aggregator-certs
CERT_DIR="${KUBE_TEMP}/easy-rsa-master/easyrsa3"
# By default, linux wraps base64 output every 76 cols, so we use 'tr -d' to remove whitespaces.
# Note 'base64 -w0' doesn't work on Mac OS X, which has different flags.
CA_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/ca.key" | base64 | tr -d '\r\n')
CA_CERT_BASE64=$(cat "${CERT_DIR}/pki/ca.crt" | base64 | tr -d '\r\n')
MASTER_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/${MASTER_NAME}.crt" | base64 | tr -d '\r\n')
MASTER_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/${MASTER_NAME}.key" | base64 | tr -d '\r\n')
KUBELET_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/kubelet.crt" | base64 | tr -d '\r\n')
KUBELET_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/kubelet.key" | base64 | tr -d '\r\n')
KUBECFG_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/kubecfg.crt" | base64 | tr -d '\r\n')
KUBECFG_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/kubecfg.key" | base64 | tr -d '\r\n')
KUBEAPISERVER_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/kube-apiserver.crt" | base64 | tr -d '\r\n')
KUBEAPISERVER_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/kube-apiserver.key" | base64 | tr -d '\r\n')
# Setting up an addition directory (beyond pki) as it is the simplest way to
# ensure we get a different CA pair to sign the proxy-client certs and which
# we can send CA public key to the user-apiserver to validate communication.
AGGREGATOR_CERT_DIR="${KUBE_TEMP}/easy-rsa-master/aggregator"
AGGREGATOR_CA_KEY_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/private/ca.key" | base64 | tr -d '\r\n')
REQUESTHEADER_CA_CERT_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/ca.crt" | base64 | tr -d '\r\n')
PROXY_CLIENT_CERT_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/issued/proxy-client.crt" | base64 | tr -d '\r\n')
PROXY_CLIENT_KEY_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/private/proxy-client.key" | base64 | tr -d '\r\n')
}
function setup-easyrsa {
local -r cert_create_debug_output=$(mktemp "${KUBE_TEMP}/cert_create_debug_output.XXX")
# Note: This was heavily cribbed from make-ca-cert.sh
(set -x
cd "${KUBE_TEMP}"
curl -L -O --connect-timeout 20 --retry 6 --retry-delay 2 https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz
tar xzf easy-rsa.tar.gz
mkdir easy-rsa-master/kubelet
cp -r easy-rsa-master/easyrsa3/* easy-rsa-master/kubelet
mkdir easy-rsa-master/aggregator
cp -r easy-rsa-master/easyrsa3/* easy-rsa-master/aggregator) &>${cert_create_debug_output} || {
# If there was an error in the subshell, just die.
# TODO(roberthbailey): add better error handling here
cat "${cert_create_debug_output}" >&2
echo "=== Failed to setup easy-rsa: Aborting ===" >&2
exit 2
}
}
# Runs the easy RSA commands to generate certificate files.
# The generated files are at ${KUBE_TEMP}/easy-rsa-master/easyrsa3
#
# Assumed vars
# KUBE_TEMP
# MASTER_NAME
# PRIMARY_CN: Primary canonical name
# SANS: Subject alternate names
#
#
function generate-certs {
local -r cert_create_debug_output=$(mktemp "${KUBE_TEMP}/cert_create_debug_output.XXX")
# Note: This was heavily cribbed from make-ca-cert.sh
(set -x
cd "${KUBE_TEMP}/easy-rsa-master/easyrsa3"
./easyrsa init-pki
# this puts the cert into pki/ca.crt and the key into pki/private/ca.key
./easyrsa --batch "--req-cn=${PRIMARY_CN}@$(date +%s)" build-ca nopass
./easyrsa --subject-alt-name="${SANS}" build-server-full "${MASTER_NAME}" nopass
./easyrsa build-client-full kube-apiserver nopass
kube::util::ensure-cfssl "${KUBE_TEMP}/cfssl"
# make the config for the signer
echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment","client auth"]}}}' > "ca-config.json"
# create the kubelet client cert with the correct groups
echo '{"CN":"kubelet","names":[{"O":"system:nodes"}],"hosts":[""],"key":{"algo":"rsa","size":2048}}' | "${CFSSL_BIN}" gencert -ca=pki/ca.crt -ca-key=pki/private/ca.key -config=ca-config.json - | "${CFSSLJSON_BIN}" -bare kubelet
mv "kubelet-key.pem" "pki/private/kubelet.key"
mv "kubelet.pem" "pki/issued/kubelet.crt"
rm -f "kubelet.csr"
# Make a superuser client cert with subject "O=system:masters, CN=kubecfg"
./easyrsa --dn-mode=org \
--req-cn=kubecfg --req-org=system:masters \
--req-c= --req-st= --req-city= --req-email= --req-ou= \
build-client-full kubecfg nopass) &>${cert_create_debug_output} || {
# If there was an error in the subshell, just die.
# TODO(roberthbailey): add better error handling here
cat "${cert_create_debug_output}" >&2
echo "=== Failed to generate master certificates: Aborting ===" >&2
exit 2
}
}
# Runs the easy RSA commands to generate aggregator certificate files.
# The generated files are at ${KUBE_TEMP}/easy-rsa-master/aggregator
#
# Assumed vars
# KUBE_TEMP
# AGGREGATOR_MASTER_NAME
# AGGREGATOR_PRIMARY_CN: Primary canonical name
# AGGREGATOR_SANS: Subject alternate names
#
#
function generate-aggregator-certs {
local -r cert_create_debug_output=$(mktemp "${KUBE_TEMP}/cert_create_debug_output.XXX")
# Note: This was heavily cribbed from make-ca-cert.sh
(set -x
cd "${KUBE_TEMP}/easy-rsa-master/aggregator"
./easyrsa init-pki
# this puts the cert into pki/ca.crt and the key into pki/private/ca.key
./easyrsa --batch "--req-cn=${AGGREGATOR_PRIMARY_CN}@$(date +%s)" build-ca nopass
./easyrsa --subject-alt-name="${AGGREGATOR_SANS}" build-server-full "${AGGREGATOR_MASTER_NAME}" nopass
./easyrsa build-client-full aggregator-apiserver nopass
kube::util::ensure-cfssl "${KUBE_TEMP}/cfssl"
# make the config for the signer
echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment","client auth"]}}}' > "ca-config.json"
# create the aggregator client cert with the correct groups
echo '{"CN":"aggregator","hosts":[""],"key":{"algo":"rsa","size":2048}}' | "${CFSSL_BIN}" gencert -ca=pki/ca.crt -ca-key=pki/private/ca.key -config=ca-config.json - | "${CFSSLJSON_BIN}" -bare proxy-client
mv "proxy-client-key.pem" "pki/private/proxy-client.key"
mv "proxy-client.pem" "pki/issued/proxy-client.crt"
rm -f "proxy-client.csr"
# Make a superuser client cert with subject "O=system:masters, CN=kubecfg"
./easyrsa --dn-mode=org \
--req-cn=proxy-clientcfg --req-org=system:aggregator \
--req-c= --req-st= --req-city= --req-email= --req-ou= \
build-client-full proxy-clientcfg nopass) &>${cert_create_debug_output} || {
# If there was an error in the subshell, just die.
# TODO(roberthbailey): add better error handling here
cat "${cert_create_debug_output}" >&2
echo "=== Failed to generate aggregator certificates: Aborting ===" >&2
exit 2
}
}
# Run the cfssl command to generates certificate files for etcd service, the
# certificate files will save in $1 directory.
#
# Optional vars:
# GEN_ETCD_CA_CERT (CA cert encode with base64 and ZIP compression)
# GEN_ETCD_CA_KEY (CA key encode with base64)
#
# If GEN_ETCD_CA_CERT or GEN_ETCD_CA_KEY is not specified, it will generates certs for CA.
#
# Args:
# $1 (the directory that certificate files to save)
# $2 (the ip of etcd member)
# $3 (the type of etcd certificates, must be one of client, server, peer)
# $4 (the prefix of the certificate filename, default is $3)
function generate-etcd-cert() {
local cert_dir=${1}
local member_ip=${2}
local type_cert=${3}
local prefix=${4:-"${type_cert}"}
local GEN_ETCD_CA_CERT=${GEN_ETCD_CA_CERT:-}
local GEN_ETCD_CA_KEY=${GEN_ETCD_CA_KEY:-}
mkdir -p "${cert_dir}"
pushd "${cert_dir}"
kube::util::ensure-cfssl .
if [ ! -r "ca-config.json" ]; then
cat >ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "43800h"
},
"profiles": {
"server": {
"expiry": "43800h",
"usages": [
"signing",
"key encipherment",
"server auth"
]
},
"client": {
"expiry": "43800h",
"usages": [
"signing",
"key encipherment",
"client auth"
]
},
"peer": {
"expiry": "43800h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
fi
if [ ! -r "ca-csr.json" ]; then
cat >ca-csr.json <<EOF
{
"CN": "Kubernetes",
"key": {
"algo": "ecdsa",
"size": 256
},
"names": [
{
"C": "US",
"L": "CA",
"O": "kubernetes.io"
}
]
}
EOF
fi
if [[ -n "${GEN_ETCD_CA_CERT}" && -n "${GEN_ETCD_CA_KEY}" ]]; then
echo "${ca_cert}" | base64 --decode | gunzip > ca.pem
echo "${ca_key}" | base64 --decode > ca-key.pem
fi
if [[ ! -r "ca.pem" || ! -r "ca-key.pem" ]]; then
${CFSSL_BIN} gencert -initca ca-csr.json | ${CFSSLJSON_BIN} -bare ca -
fi
case "${type_cert}" in
client)
echo "Generate client certificates..."
echo '{"CN":"client","hosts":["*"],"key":{"algo":"ecdsa","size":256}}' \
| ${CFSSL_BIN} gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client - \
| ${CFSSLJSON_BIN} -bare "${prefix}"
;;
server)
echo "Generate server certificates..."
echo '{"CN":"'${member_ip}'","hosts":[""],"key":{"algo":"ecdsa","size":256}}' \
| ${CFSSL_BIN} gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server -hostname="${member_ip},127.0.0.1" - \
| ${CFSSLJSON_BIN} -bare "${prefix}"
;;
peer)
echo "Generate peer certificates..."
echo '{"CN":"'${member_ip}'","hosts":[""],"key":{"algo":"ecdsa","size":256}}' \
| ${CFSSL_BIN} gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer -hostname="${member_ip},127.0.0.1" - \
| ${CFSSLJSON_BIN} -bare "${prefix}"
;;
*)
echo "Unknow, unsupported etcd certs type: ${type_cert}" >&2
echo "Supported type: client, server, peer" >&2
exit 2
esac
popd
}
#
# Using provided master env, extracts value from provided key.
#
# Args:
# $1 master env (kube-env of master; result of calling get-master-env)
# $2 env key to use
function get-env-val() {
local match=`(echo "${1}" | grep -E "^${2}:") || echo ""`
if [[ -z ${match} ]]; then
echo ""
fi
echo ${match} | cut -d : -f 2 | cut -d \' -f 2
}
# Load the master env by calling get-master-env, and extract important values
function parse-master-env() {
# Get required master env vars
local master_env=$(get-master-env)
KUBELET_TOKEN=$(get-env-val "${master_env}" "KUBELET_TOKEN")
KUBE_PROXY_TOKEN=$(get-env-val "${master_env}" "KUBE_PROXY_TOKEN")
NODE_PROBLEM_DETECTOR_TOKEN=$(get-env-val "${master_env}" "NODE_PROBLEM_DETECTOR_TOKEN")
CA_CERT_BASE64=$(get-env-val "${master_env}" "CA_CERT")
CA_KEY_BASE64=$(get-env-val "${master_env}" "CA_KEY")
KUBEAPISERVER_CERT_BASE64=$(get-env-val "${master_env}" "KUBEAPISERVER_CERT")
KUBEAPISERVER_KEY_BASE64=$(get-env-val "${master_env}" "KUBEAPISERVER_KEY")
EXTRA_DOCKER_OPTS=$(get-env-val "${master_env}" "EXTRA_DOCKER_OPTS")
KUBELET_CERT_BASE64=$(get-env-val "${master_env}" "KUBELET_CERT")
KUBELET_KEY_BASE64=$(get-env-val "${master_env}" "KUBELET_KEY")
MASTER_CERT_BASE64=$(get-env-val "${master_env}" "MASTER_CERT")
MASTER_KEY_BASE64=$(get-env-val "${master_env}" "MASTER_KEY")
AGGREGATOR_CA_KEY_BASE64=$(get-env-val "${master_env}" "AGGREGATOR_CA_KEY")
REQUESTHEADER_CA_CERT_BASE64=$(get-env-val "${master_env}" "REQUESTHEADER_CA_CERT")
PROXY_CLIENT_CERT_BASE64=$(get-env-val "${master_env}" "PROXY_CLIENT_CERT")
PROXY_CLIENT_KEY_BASE64=$(get-env-val "${master_env}" "PROXY_CLIENT_KEY")
ENABLE_LEGACY_ABAC=$(get-env-val "${master_env}" "ENABLE_LEGACY_ABAC")
}
# Update or verify required gcloud components are installed
# at minimum required version.
# Assumed vars
# KUBE_PROMPT_FOR_UPDATE
function update-or-verify-gcloud() {
local sudo_prefix=""
if [ ! -w $(dirname `which gcloud`) ]; then
sudo_prefix="sudo"
fi
# update and install components as needed
if [[ "${KUBE_PROMPT_FOR_UPDATE}" == "y" ]]; then
${sudo_prefix} gcloud ${gcloud_prompt:-} components install alpha
${sudo_prefix} gcloud ${gcloud_prompt:-} components install beta
${sudo_prefix} gcloud ${gcloud_prompt:-} components update
else
local version=$(gcloud version --format=json)
python -c'
import json,sys
from distutils import version
minVersion = version.LooseVersion("1.3.0")
required = [ "alpha", "beta", "core" ]
data = json.loads(sys.argv[1])
rel = data.get("Google Cloud SDK")
if rel != "HEAD" and version.LooseVersion(rel) < minVersion:
print("gcloud version out of date ( < %s )" % minVersion)
exit(1)
missing = []
for c in required:
if not data.get(c):
missing += [c]
if missing:
for c in missing:
print ("missing required gcloud component \"{0}\"".format(c))
exit(1)
' """${version}"""
fi
}
# Check whether required client and server binaries exist, prompting to download
# if missing.
# If KUBERNETES_SKIP_CONFIRM is set to y, we'll automatically download binaries
# without prompting.
function verify-kube-binaries() {
local missing_binaries=false
if ! "${KUBE_ROOT}/cluster/kubectl.sh" version --client >&/dev/null; then
echo "!!! kubectl appears to be broken or missing"
missing_binaries=true
fi
if ! $(find-release-tars); then
missing_binaries=true
fi
if ! "${missing_binaries}"; then
return
fi
get_binaries_script="${KUBE_ROOT}/cluster/get-kube-binaries.sh"
local resp="y"
if [[ ! "${KUBERNETES_SKIP_CONFIRM:-n}" =~ ^[yY]$ ]]; then
echo "Required binaries appear to be missing. Do you wish to download them? [Y/n]"
read resp
fi
if [[ "${resp}" =~ ^[nN]$ ]]; then
echo "You must download binaries to continue. You can use "
echo " ${get_binaries_script}"
echo "to do this for your automatically."
exit 1
fi
"${get_binaries_script}"
}
# Run pushd without stack output
function pushd() {
command pushd $@ > /dev/null
}
# Run popd without stack output
function popd() {
command popd $@ > /dev/null
}
|
public class BookStoreApp {
private ArrayList<Book> books;
public BookStoreApp() {
books = new ArrayList<>();
}
public void addBook(Book book) {
books.add(book);
}
public ArrayList<Book> searchBooks(String query) {
ArrayList<Book> results = new ArrayList<>();
for (Book book : books) {
if (book.getName().contains(query) || book.getAuthor().contains(query)) {
results.add(book);
}
}
return results;
}
}
|
<gh_stars>0
/* global ethers */
const { expect } = require('chai')
const { toFixedHex, poseidonHash2, randomBN } = require('../src/utils')
const MerkleTree = require('fixed-merkle-tree')
const controller = require('../src/index')
async function register(note, sacredTrees, from) {
await sacredTrees
.connect(from)
.register(
note.instance,
toFixedHex(note.commitment),
toFixedHex(note.nullifierHash),
note.depositBlock,
note.withdrawalBlock,
)
}
const levels = 20
const CHUNK_TREE_HEIGHT = 8
const instances = [
'0x1111000000000000000000000000000000001111',
'0x2222000000000000000000000000000000002222',
'0x3333000000000000000000000000000000003333',
'0x4444000000000000000000000000000000004444',
]
const blocks = ['0xaaaaaaaa', '0xbbbbbbbb', '0xcccccccc', '0xdddddddd']
describe('SacredTrees', function () {
let tree
let operator
let sacredProxy
let verifier
let sacredTrees
let sacredTreesV1
let notes
let depositDataEventFilter
const depositEvents = []
const withdrawalEvents = []
beforeEach(async function () {
tree = new MerkleTree(levels, [], { hashFunction: poseidonHash2 })
;[operator, sacredProxy] = await ethers.getSigners()
const BatchTreeUpdateVerifier = await ethers.getContractFactory('BatchTreeUpdateVerifier')
verifier = await BatchTreeUpdateVerifier.deploy()
const SacredTreesV1 = await ethers.getContractFactory('SacredTreesV1Mock')
sacredTreesV1 = await SacredTreesV1.deploy(0, 0, tree.root(), tree.root())
notes = []
for (let i = 0; i < 2 ** CHUNK_TREE_HEIGHT; i++) {
notes[i] = {
instance: instances[i % instances.length],
depositBlock: blocks[i % blocks.length],
withdrawalBlock: 2 + i + i * 4 * 60 * 24,
commitment: randomBN(),
nullifierHash: randomBN(),
}
await register(notes[i], sacredTreesV1, sacredProxy)
depositEvents[i] = {
hash: toFixedHex(notes[i].commitment),
instance: toFixedHex(notes[i].instance, 20),
block: toFixedHex(notes[i].depositBlock, 4),
}
withdrawalEvents[i] = {
hash: toFixedHex(notes[i].nullifierHash),
instance: toFixedHex(notes[i].instance, 20),
block: toFixedHex(notes[i].withdrawalBlock, 4),
}
}
const SacredTrees = await ethers.getContractFactory('SacredTreesMock')
sacredTrees = await SacredTrees.deploy(operator.address, sacredTreesV1.address, {
depositsFrom: 1,
depositsStep: 1,
withdrawalsFrom: 2,
withdrawalsStep: 2,
})
await sacredTrees.initialize(sacredProxy.address, verifier.address)
depositDataEventFilter = sacredTrees.filters.DepositData()
})
describe('#updateDepositTree', () => {
it('should check hash', async () => {
const { args } = controller.batchTreeUpdate(tree, depositEvents)
const solHash = await sacredTrees.updateDepositTreeMock(...args.slice(1))
expect(solHash).to.be.equal(args[0])
})
it('should prove snark', async () => {
const { input, args } = controller.batchTreeUpdate(tree, depositEvents)
const proof = await controller.prove(input, './artifacts/circuits/BatchTreeUpdate')
await sacredTrees.updateDepositTree(proof, ...args)
const updatedRoot = await sacredTrees.depositRoot()
expect(updatedRoot).to.be.equal(tree.root())
})
it('should work for non-empty tree', async () => {
let { input, args } = controller.batchTreeUpdate(tree, depositEvents)
let proof = await controller.prove(input, './artifacts/circuits/BatchTreeUpdate')
await sacredTrees.updateDepositTree(proof, ...args)
let updatedRoot = await sacredTrees.depositRoot()
expect(updatedRoot).to.be.equal(tree.root())
//
for (let i = 0; i < notes.length; i++) {
await register(notes[i], sacredTrees, sacredProxy)
}
;({ input, args } = controller.batchTreeUpdate(tree, depositEvents))
proof = await controller.prove(input, './artifacts/circuits/BatchTreeUpdate')
await sacredTrees.updateDepositTree(proof, ...args)
updatedRoot = await sacredTrees.depositRoot()
expect(updatedRoot).to.be.equal(tree.root())
})
it('should work with events from contracts', async () => {
let { input, args } = controller.batchTreeUpdate(tree, depositEvents)
let proof = await controller.prove(input, './artifacts/circuits/BatchTreeUpdate')
await sacredTrees.updateDepositTree(proof, ...args)
let updatedRoot = await sacredTrees.depositRoot()
expect(updatedRoot).to.be.equal(tree.root())
const migratedEvents = await sacredTrees.queryFilter(depositDataEventFilter)
migratedEvents.forEach((e, i) => {
expect(e.args.index).to.be.equal(i)
})
//
for (let i = 0; i < notes.length; i++) {
await register(notes[i], sacredTrees, sacredProxy)
}
let registeredEvents = await sacredTrees.queryFilter(depositDataEventFilter)
registeredEvents = registeredEvents.map((e) => ({
hash: toFixedHex(e.args.hash),
instance: toFixedHex(e.args.instance, 20),
block: toFixedHex(e.args.block, 4),
}))
;({ input, args } = controller.batchTreeUpdate(tree, registeredEvents.slice(0, notes.length)))
proof = await controller.prove(input, './artifacts/circuits/BatchTreeUpdate')
await sacredTrees.updateDepositTree(proof, ...args)
updatedRoot = await sacredTrees.depositRoot()
expect(updatedRoot).to.be.equal(tree.root())
})
it('should work for batch+N filled v1 tree', async () => {
const batchSize = 2 ** CHUNK_TREE_HEIGHT
for (let i = batchSize; i < batchSize + 2; i++) {
notes.push({
instance: instances[i % instances.length],
depositBlock: blocks[i % blocks.length],
withdrawalBlock: 2 + i + i * 4 * 60 * 24,
commitment: randomBN(),
nullifierHash: randomBN(),
})
await register(notes[i], sacredTreesV1, sacredProxy)
}
const SacredTrees = await ethers.getContractFactory('SacredTreesMock')
const newSacredTrees = await SacredTrees.deploy(operator.address, sacredTreesV1.address, {
depositsFrom: 1,
depositsStep: 1,
withdrawalsFrom: 2,
withdrawalsStep: 2,
})
await newSacredTrees.initialize(sacredProxy.address, verifier.address)
// load first batchSize deposits
let { input, args } = controller.batchTreeUpdate(tree, depositEvents)
let proof = await controller.prove(input, './artifacts/circuits/BatchTreeUpdate')
await newSacredTrees.updateDepositTree(proof, ...args)
let updatedRoot = await newSacredTrees.depositRoot()
expect(updatedRoot).to.be.equal(tree.root())
// register 2 * `notes.length` new deposits on the new trees
for (let i = 0; i < notes.length; i++) {
await register(notes[i], newSacredTrees, sacredProxy)
}
for (let i = 0; i < notes.length; i++) {
await register(notes[i], newSacredTrees, sacredProxy)
}
// get 2 extra events from v1 tress
let events = notes.slice(batchSize).map((note) => ({
hash: toFixedHex(note.commitment),
instance: toFixedHex(note.instance, 20),
block: toFixedHex(note.depositBlock, 4),
}))
let registeredEvents = await newSacredTrees.queryFilter(depositDataEventFilter)
registeredEvents = registeredEvents.slice(batchSize) // cut processed deposits from v1
events = events.concat(
registeredEvents.slice(0, batchSize - 2).map((e) => ({
hash: toFixedHex(e.args.hash),
instance: toFixedHex(e.args.instance, 20),
block: toFixedHex(e.args.block, 4),
})),
)
//
;({ input, args } = controller.batchTreeUpdate(tree, events))
proof = await controller.prove(input, './artifacts/circuits/BatchTreeUpdate')
await newSacredTrees.updateDepositTree(proof, ...args)
updatedRoot = await newSacredTrees.depositRoot()
expect(updatedRoot).to.be.equal(tree.root())
events = registeredEvents.slice(batchSize - 2, 2 * batchSize - 2).map((e) => ({
hash: toFixedHex(e.args.hash),
instance: toFixedHex(e.args.instance, 20),
block: toFixedHex(e.args.block, 4),
}))
;({ input, args } = controller.batchTreeUpdate(tree, events))
proof = await controller.prove(input, './artifacts/circuits/BatchTreeUpdate')
await newSacredTrees.updateDepositTree(proof, ...args)
updatedRoot = await newSacredTrees.depositRoot()
expect(updatedRoot).to.be.equal(tree.root())
})
it('should reject for partially filled tree')
it('should reject for outdated deposit root')
it('should reject for incorrect insert index')
it('should reject for overflows of newRoot')
it('should reject for invalid sha256 args')
})
describe('#getRegisteredDeposits', () => {
it('should work', async () => {
for (let i = 0; i < 2 ** CHUNK_TREE_HEIGHT; i++) {
notes[i] = {
instance: instances[i % instances.length],
depositBlock: blocks[i % blocks.length],
withdrawalBlock: 2 + i + i * 4 * 60 * 24,
commitment: randomBN(),
nullifierHash: randomBN(),
}
await register(notes[i], sacredTrees, sacredProxy)
}
const abi = new ethers.utils.AbiCoder()
const count = await sacredTrees.depositsLength()
const _deposits = await sacredTrees.getRegisteredDeposits()
expect(count).to.be.equal(notes.length * 2)
_deposits.forEach((hash, i) => {
if (i < notes.length) {
expect(hash).to.be.equal('0x0000000000000000000000000000000000000000000000000000000000000000')
} else {
const index = i - notes.length
const encodedData = abi.encode(
['address', 'bytes32', 'uint256'],
[notes[index].instance, toFixedHex(notes[index].commitment), notes[index].depositBlock],
)
const leaf = ethers.utils.keccak256(encodedData)
expect(leaf).to.be.equal(hash)
}
})
// res.length.should.be.equal(1)
// res[0].should.be.true
// await sacredTrees.updateRoots([note1DepositLeaf], [])
// res = await sacredTrees.getRegisteredDeposits()
// res.length.should.be.equal(0)
// await registerDeposit(note2, sacredTrees)
// res = await sacredTrees.getRegisteredDeposits()
// // res[0].should.be.true
})
})
describe('#getRegisteredWithdrawals', () => {
it('should work', async () => {
for (let i = 0; i < 2 ** CHUNK_TREE_HEIGHT; i++) {
notes[i] = {
instance: instances[i % instances.length],
depositBlock: blocks[i % blocks.length],
withdrawalBlock: 2 + i + i * 4 * 60 * 24,
commitment: randomBN(),
nullifierHash: randomBN(),
}
await register(notes[i], sacredTrees, sacredProxy)
}
const abi = new ethers.utils.AbiCoder()
const count = await sacredTrees.withdrawalsLength()
const _withdrawals = await sacredTrees.getRegisteredWithdrawals()
expect(count).to.be.equal(notes.length * 2)
_withdrawals.forEach((hash, i) => {
if (i < notes.length) {
expect(hash).to.be.equal('0x0000000000000000000000000000000000000000000000000000000000000000')
} else {
const index = i - notes.length
const encodedData = abi.encode(
['address', 'bytes32', 'uint256'],
[notes[index].instance, toFixedHex(notes[index].nullifierHash), notes[index].withdrawalBlock],
)
const leaf = ethers.utils.keccak256(encodedData)
expect(leaf).to.be.equal(hash)
}
})
})
})
})
|
TERMUX_PKG_HOMEPAGE=https://www.tug.org/texlive/
TERMUX_PKG_DESCRIPTION="TeX Live is a distribution of the TeX typesetting system."
TERMUX_PKG_LICENSE="GPL-2.0"
TERMUX_PKG_MAINTAINER="Henrik Grimler @Grimler91"
TERMUX_PKG_VERSION=20190410
TERMUX_PKG_REVISION=1
TERMUX_PKG_SRCURL=ftp://ftp.tug.org/texlive/historic/${TERMUX_PKG_VERSION:0:4}/texlive-${TERMUX_PKG_VERSION}-texmf.tar.xz
TERMUX_PKG_SHA256=c2ec974abc98b91995969e7871a0b56dbc80dd8508113ffcff6923e912c4c402
TERMUX_PKG_DEPENDS="perl, texlive-bin (>= 20190410)"
TERMUX_PKG_CONFLICTS="texlive (<< 20170524-5), texlive-bin (<< 20190410), texlive-tlmgr (<< 20190410)"
TERMUX_PKG_REPLACES="texlive-bin (<< 20190410), texlive-tlmgr (<< 20190410)"
TERMUX_PKG_RECOMMENDS="texlive-tlmgr"
TERMUX_PKG_PLATFORM_INDEPENDENT=yes
TERMUX_PKG_HAS_DEBUG=no
TERMUX_PKG_BUILD_IN_SRC=yes
TL_ROOT=$TERMUX_PREFIX/share/texlive
TL_BINDIR=$TERMUX_PREFIX/bin
termux_step_post_extract_package() {
cd $TERMUX_PKG_CACHEDIR
termux_download ftp://ftp.tug.org/texlive/historic/${TERMUX_PKG_VERSION:0:4}/install-tl-unx.tar.gz \
install-tl-unx.tar.gz \
44aa41b5783e345b7021387f19ac9637ff1ce5406a59754230c666642dfe7750
tar -xf install-tl-unx.tar.gz
mv install-tl-*/install-tl \
install-tl-*/LICENSE.CTAN \
install-tl-*/LICENSE.TL \
install-tl-*/release-texlive.txt \
install-tl-*/tlpkg \
$TERMUX_PKG_SRCDIR/
# Download texlive.tlpdb, parse to get file lists and include in texlive-full.
termux_download ftp://ftp.tug.org/texlive/historic/${TERMUX_PKG_VERSION:0:4}/texlive-${TERMUX_PKG_VERSION}-tlpdb-full.tar.gz \
texlive-${TERMUX_PKG_VERSION}-tlpdb-full.tar.gz \
4c93a5c7d28df63c6dd7f767822e5dacf9290a0dff4990663e283b6e2d8d1918
tar xf texlive-${TERMUX_PKG_VERSION}-tlpdb-full.tar.gz
mv texlive.tlpdb $TERMUX_PKG_TMPDIR
}
termux_step_make() {
sed -i "s% RELOC/% texmf-dist/%g" $TERMUX_PKG_TMPDIR/texlive.tlpdb
mkdir -p $TL_ROOT
cp -r $TERMUX_PKG_BUILDDIR/* $TL_ROOT/
perl -I$TL_ROOT/tlpkg/ $TL_ROOT/texmf-dist/scripts/texlive/mktexlsr.pl $TL_ROOT/texmf-dist
mkdir -p $TL_ROOT/tlpkg
cp $TERMUX_PKG_TMPDIR/texlive.tlpdb $TL_ROOT/tlpkg/
}
termux_step_create_debscripts() {
echo "#!$TERMUX_PREFIX/bin/bash" > postinst
echo "mktexlsr $TL_ROOT/texmf-var" >> postinst
echo "texlinks" >> postinst
echo "echo ''" >> postinst
echo "echo Welcome to TeX Live!" >> postinst
echo "echo ''" >> postinst
echo "echo 'TeX Live is a joint project of the TeX user groups around the world;'" >> postinst
echo "echo 'please consider supporting it by joining the group best for you.'" >> postinst
echo "echo 'The list of groups is available on the web at http://tug.org/usergroups.html.'" >> postinst
echo "exit 0" >> postinst
chmod 0755 postinst
# Remove all files installed through tlmgr on removal
echo "#!$TERMUX_PREFIX/bin/bash" > prerm
echo 'if [ $1 != "remove" ]; then exit 0; fi' >> prerm
echo "echo Running texlinks --unlink" >> prerm
echo "texlinks --unlink" >> prerm
echo "echo Removing texmf-dist" >> prerm
echo "rm -rf $TL_ROOT/texmf-dist" >> prerm
echo "echo Removing texmf-var and tlpkg" >> prerm
echo "rm -rf $TL_ROOT/{texmf-var,tlpkg/{texlive.tlpdb.*,tlpobj,backups}}" >> prerm
echo "exit 0" >> prerm
chmod 0755 prerm
}
TERMUX_PKG_RM_AFTER_INSTALL="
share/texlive/install-tl
share/texlive/texmf-dist/scripts/texlive/uninstall-win32.pl
share/texlive/texmf-dist/scripts/texlive/uninstq.vbs
share/texlive/texmf-dist/scripts/texlive/tlmgr.pl
share/texlive/texmf-dist/scripts/texlive/tlmgrgui.pl
share/texlive/tlpkg/gpg
share/texlive/tlpkg/installer
share/texlive/tlpkg/tltcl
share/texlive/tlpkg/translations
share/texlive/texmf-dist/doc
share/texlive/texmf-dist/source
"
# Here are all the files in collection-wintools: (single quotes due to share/texlive/tlpkg/dviout/UTILITY/dvi$pdf.bat)
TERMUX_PKG_RM_AFTER_INSTALL+='
share/texlive/tlpkg/dviout/GRAPHIC/PDL/pdldoc.tex
share/texlive/tlpkg/dviout/DOC/cmode1.png
share/texlive/texmf-dist/doc/support/wintools/pdfseparate.pdf
share/texlive/texmf-dist/doc/support/wintools/fc-query.pdf
share/texlive/texmf-dist/doc/support/tlaunch/figures/tlaunch_rug.png
share/texlive/tlpkg/dviout/GRAPHIC/TPIC/tpicdoc.tex
share/texlive/tlpkg/dviout/dviout.cnt
share/texlive/tlpkg/dviout/par/p4to1e.pgm
share/texlive/tlpkg/dviout/DOC/niko.bmp
share/texlive/tlpkg/dviout/GRAPHIC/COLOR/color.tex
share/texlive/bin/win32/luajitlatex.exe
share/texlive/tlpkg/dviout/DOC/dviouttipse.html
share/texlive/tlpkg/dviout/SAMPLE/slisamp2.tex
share/texlive/tlpkg/dviout/DOC/cmode2.png
share/texlive/tlpkg/dviout/DOC/seru.bmp
share/texlive/bin/win32/type1afm.exe
share/texlive/tlpkg/dviout/bmc.exe
share/texlive/tlpkg/dviout/GRAPHIC/PS/sample1.ps
share/texlive/tlpkg/dviout/par/HG-GyouSho.par
share/texlive/tlpkg/dviout/DOC/cmode.html
share/texlive/texmf-dist/doc/support/wintools/pdftoppm.pdf
share/texlive/texmf-dist/doc/support/wintools/fc-pattern.pdf
share/texlive/tlpkg/dviout/GRAPHIC/PS/starbrst.ps
share/texlive/texmf-dist/doc/support/wintools/pdfdetach.pdf
share/texlive/bin/win32/png22pnm.exe
share/texlive/tlpkg/dviout/GRAPHIC/PDL/lasersys.lp3
share/texlive/tlpkg/dviout/DOC/cmode6.png
share/texlive/tlpkg/dviout/CreateBB.exe
share/texlive/tlpkg/dviout/par/p4to1.pgm
share/texlive/texmf-dist/scripts/tlaunch/tlaunchmode.pl
share/texlive/texmf-dist/doc/support/tlaunch/figures/tlaunch_window.png
share/texlive/tlpkg/dviout/FONT/winttf.zip
share/texlive/tlpkg/dviout/GRAPHIC/LATEX2E/dviout.dtx
share/texlive/tlpkg/dviout/GRAPHIC/PBM/pbmf.sty
share/texlive/texmf-dist/doc/support/wintools/zip.pdf
share/texlive/tlpkg/dviout/convedit.exe
share/texlive/tlpkg/dviout/par/TTfont.par
share/texlive/tlpkg/dviout/SAMPLE/slisamp4.tex
share/texlive/bin/win32/tiff2png.exe
share/texlive/tlpkg/dviout/DOC/lminus.bmp
share/texlive/bin/win32/pdftotext.exe
share/texlive/tlpkg/dviout/SPECIAL/demo.tex
share/texlive/tlpkg/dviout/SPECIAL/src.mac
share/texlive/tlpkg/dviout/par/dvipskdl.par
share/texlive/tlpkg/dviout/par/p12wait.pgm
share/texlive/bin/win32/tif22pnm.exe
share/texlive/tlpkg/dviout/par/Hidemaru.par
share/texlive/tlpkg/dviout/UTILITY/test_b5.tex
share/texlive/tlpkg/dviout/par/DF-GyouSho.par
share/texlive/bin/win32/gzip.exe
share/texlive/bin/win32/tomac.exe
share/texlive/texmf-dist/doc/support/wintools/pdftohtml.pdf
share/texlive/tlpkg/dviout/par/PKfont.par
share/texlive/tlpkg/dviout/GRAPHIC/PDL/file241b.p98
share/texlive/texmf-dist/doc/support/wintools/fc-cache.pdf
share/texlive/tlpkg/dviout/DOC/fpage.bmp
share/texlive/tlpkg/dviout/par/p4to1e0.pgm
share/texlive/texmf-dist/doc/support/wintools/fc-match.pdf
share/texlive/bin/win32/tlaunch.exe
share/texlive/tlpkg/dviout/DOC/dviouttips.html
share/texlive/tlpkg/dviout/UTILITY/test_b5e.tex
share/texlive/texmf-dist/doc/support/wintools/pdfimages.pdf
share/texlive/tlpkg/dviout/GRAPHIC/TPIC/linetest.tex
share/texlive/tlpkg/dviout/UTILITY/dvioute.vfn
share/texlive/tlpkg/dviout/DOC/dvioutQA.html
share/texlive/bin/win32/bitmap2eps.exe
share/texlive/tlpkg/dviout/GRAPHIC/LATEX2E/dviout.ins
share/texlive/texmf-dist/doc/support/wintools/fc-scan.pdf
share/texlive/tlpkg/dviout/UTILITY/template
share/texlive/bin/win32/pdffonts.exe
share/texlive/tlpkg/dviout/par/p4n0.pgm
share/texlive/tlpkg/dviout/install.par
share/texlive/texmf-dist/scripts/bitmap2eps/bitmap2eps.vbs
share/texlive/texmf-dist/doc/support/wintools/pdftops.pdf
share/texlive/tlpkg/dviout/GRAPHIC/LATEX2E/color.cfg
share/texlive/tlpkg/dviout/map/morisawa.map
share/texlive/tlpkg/dviout/GRAPHIC/PDL/spec.lp3
share/texlive/tlpkg/dviout/SAMPLE/slisampl.tex
share/texlive/bin/win32/bmeps.exe
share/texlive/tlpkg/dviout/DOC/tex_instchk.html
share/texlive/tlpkg/dviout/GRAPHIC/PS/gssub.exe
share/texlive/tlpkg/dviout/SPECIAL/srctex.cfg
share/texlive/texmf-dist/doc/support/wintools/pdftocairo.pdf
share/texlive/tlpkg/dviout/DOC/hyper.bmp
share/texlive/tlpkg/dviout/par/HG-KaiSho-PRO.par
share/texlive/tlpkg/dviout/DOC/present.html
share/texlive/tlpkg/dviout/GRAPHIC/PS/epsfdoc.tex
share/texlive/tlpkg/dviout/DOC/testtex.bat
share/texlive/tlpkg/dviout/par/p4to10.pgm
share/texlive/texmf-dist/web2c/tlaunch.ini
share/texlive/tlpkg/dviout/GRAPHIC/bmc/bmc.txt
share/texlive/tlpkg/dviout/SAMPLE/sample.txt
share/texlive/texmf-dist/doc/support/tlaunch/Changes
share/texlive/bin/win32/pdfseparate.exe
share/texlive/bin/win32/pdfimages.exe
share/texlive/tlpkg/dviout/readme.txt
share/texlive/bin/win32/pdfsig.exe
share/texlive/tlpkg/dviout/HYPERTEX/input9.tex
share/texlive/tlpkg/dviout/GRAPHIC/LATEX2E/readme
share/texlive/tlpkg/dviout/DOC/mspmin.png
share/texlive/tlpkg/dviout/GRAPHIC/PDL/picbox.tex
share/texlive/tlpkg/dviout/history.txt
share/texlive/tlpkg/dviout/DOC/kappa.bmp
share/texlive/tlpkg/dviout/UTILITY/template.pks
share/texlive/tlpkg/dviout/SPECIAL/presen.sty
share/texlive/tlpkg/dviout/rawprt.exe
share/texlive/bin/win32/pdftops.exe
share/texlive/tlpkg/dviout/GRAPHIC/bmc/createbb.pdf
share/texlive/tlpkg/dviout/map/gtfonts.map
share/texlive/texmf-dist/doc/support/tlaunch/README
share/texlive/tlpkg/dviout/DOC/le.bmp
share/texlive/tlpkg/dviout/CFG/prtsrc.zip
share/texlive/tlpkg/dviout/00readme.txt
share/texlive/bin/win32/todos.exe
share/texlive/tlpkg/dviout/GRAPHIC/bmc/ifbmc.spi
share/texlive/tlpkg/dviout/UTILITY/test_org.tex
share/texlive/tlpkg/dviout/UTILITY/dviout1.vfn
share/texlive/tlpkg/dviout/map/mojikyo.map
share/texlive/bin/win32/aftopl.exe
share/texlive/bin/win32/png2bmp.exe
share/texlive/bin/win32/unzip.exe
share/texlive/tlpkg/dviout/dvispce.txt
share/texlive/tlpkg/dviout/par/WinShell.par
share/texlive/tlpkg/dviout/HYPERTEX/hyperdvi.tex
share/texlive/bin/win32/zip.exe
share/texlive/tlpkg/dviout/GRAPHIC/PDL/lips3.gpd
share/texlive/tlpkg/dviout/GRAPHIC/PS/pssample.tex
share/texlive/tlpkg/dviout/UTILITY/template.pk0
share/texlive/tlpkg/dviout/par/dvicut.par
share/texlive/tlpkg/dviout/par/dvipdfmr.par
share/texlive/tlpkg/dviout/propw.exe
share/texlive/bin/win32/djpeg.exe
share/texlive/texmf-dist/doc/support/wintools/fc-list.pdf
share/texlive/tlpkg/dviout/par/EJ-Embed.par
share/texlive/tlpkg/dviout/par/J-Embed.par
share/texlive/tlpkg/dviout/install.txt
share/texlive/tlpkg/dviout/par/fontpath.par
share/texlive/tlpkg/dviout/FONT/exjfonts.zip
share/texlive/tlpkg/dviout/SPECIAL/srcspecial.mac
share/texlive/tlpkg/dviout/par/E-noTT.par
share/texlive/tlpkg/dviout/DOC/dvi2.bmp
share/texlive/tlpkg/dviout/CFG/prtcfg.zip
share/texlive/tlpkg/dviout/dvispc.txt
share/texlive/texmf-dist/doc/support/wintools/pdfinfo.pdf
share/texlive/bin/win32/dviout.exe
share/texlive/tlpkg/dviout/map/ttfexp.map
share/texlive/bin/win32/gunzip.exe
share/texlive/tlpkg/dviout/par/dvipdfmxv.par
share/texlive/tlpkg/dviout/DOC/bpage.bmp
share/texlive/tlpkg/dviout/DOC/lplus.bmp
share/texlive/tlpkg/dviout/HYPERTEX/input8.tex
share/texlive/texmf-dist/doc/support/wintools/unzip.pdf
share/texlive/tlpkg/dviout/DOC/tex_dvioutw.html
share/texlive/bin/win32/tlaunchmode.exe
share/texlive/tlpkg/dviout/FONT/ReadMe.txt
share/texlive/tlpkg/dviout/GRAPHIC/PS/sample2.ps
share/texlive/tlpkg/dviout/GRAPHIC/bmc/exbmc.xpi
share/texlive/tlpkg/dviout/par/Macro0.par
share/texlive/tlpkg/dviout/par/hiragino.par
share/texlive/tlpkg/dviout/par/default.par
share/texlive/bin/win32/pdfunite.exe
share/texlive/tlpkg/dviout/par/E-TT.par
share/texlive/tlpkg/dviout/CFG/newcfg.txt
share/texlive/tlpkg/dviout/par/p4to1o0.pgm
share/texlive/tlpkg/dviout/optcfg.exe
share/texlive/tlpkg/dviout/DOC/hung.png
share/texlive/tlpkg/dviout/srctex.exe
share/texlive/tlpkg/dviout/DOC/option.bmp
share/texlive/tlpkg/dviout/dviadd.exe
share/texlive/texmf-dist/doc/support/tlaunch/tlaunch.pdf
share/texlive/tlpkg/dviout/par/jvar.par
share/texlive/tlpkg/dviout/par/wintex.par
share/texlive/tlpkg/dviout/par/bakoma.par
share/texlive/tlpkg/dviout/GRAPHIC/PS/sample3.ps
share/texlive/tlpkg/dviout/HYPERTEX/inputxy.tex
share/texlive/tlpkg/dviout/SPECIAL/dviout.sty
share/texlive/tlpkg/dviout/etfdump.exe
share/texlive/texmf-dist/doc/support/tlaunch/figures/custom_ed.png
share/texlive/tlpkg/dviout/par/E-Embed.par
share/texlive/tlpkg/dviout/par/dvispcat.par
share/texlive/bin/win32/bmp2png.exe
share/texlive/tlpkg/dviout/DOC/search.bmp
share/texlive/tlpkg/dviout/GRAPHIC/PS/sample0.ps
share/texlive/tlpkg/dviout/SAMPLE/slisamp3.tex
share/texlive/tlpkg/dviout/dvioute.chm
share/texlive/tlpkg/dviout/map/pttfonts.map
share/texlive/tlpkg/dviout/GRAPHIC/TPIC/tpic_ext.doc
share/texlive/tlpkg/dviout/par/WinJFont.par
share/texlive/tlpkg/dviout/DOC/newmin.png
share/texlive/tlpkg/dviout/par/p4to1v.pgm
share/texlive/tlpkg/dviout/par/dvipskdis.par
share/texlive/tlpkg/dviout/dviout.chm
share/texlive/tlpkg/dviout/dvioute.cnt
share/texlive/tlpkg/dviout/map/japanese.map
share/texlive/tlpkg/dviout/GRAPHIC/TPIC/rtexampl.tex
share/texlive/tlpkg/dviout/DOC/dvi.html
share/texlive/tlpkg/dviout/HYPERTEX/myhyper.sty
share/texlive/tlpkg/dviout/dviout.exe
share/texlive/tlpkg/dviout/rawprt.txt
share/texlive/texmf-dist/doc/support/wintools/pdfunite.pdf
share/texlive/tlpkg/dviout/GRAPHIC/PDL/lasersys.tex
share/texlive/bin/win32/jbig2.exe
share/texlive/tlpkg/dviout/PTEX/naochan!.tex
share/texlive/tlpkg/dviout/par/dvipskpdf.par
share/texlive/texmf-dist/doc/support/tlaunch/rug.zip
share/texlive/tlpkg/dviout/par/p24wait.pgm
share/texlive/bin/win32/cjpeg.exe
share/texlive/tlpkg/dviout/par/p4to1o.pgm
share/texlive/tlpkg/dviout/par/dvipsk.par
share/texlive/texmf-dist/doc/support/tlaunch/COPYING
share/texlive/tlpkg/dviout/DOC/print.bmp
share/texlive/tlpkg/dviout/par/p4n.pgm
share/texlive/tlpkg/dviout/par/dvipdfm.par
share/texlive/tlpkg/dviout/par/quitpresen.par
share/texlive/tlpkg/dviout/GRAPHIC/LATEX2E/graphics.cfg
share/texlive/texmf-dist/doc/support/wintools/fc-validate.pdf
share/texlive/tlpkg/dviout/GRAPHIC/LATEX2E/readme.eng
share/texlive/tlpkg/dviout/GRAPHIC/bmc/ifbmc.txt
share/texlive/bin/win32/tounix.exe
share/texlive/tlpkg/dviout/GRAPHIC/PDL/spec.tex
share/texlive/tlpkg/dviout/HYPERTEX/input7.tex
share/texlive/texmf-dist/doc/support/wintools/pdffonts.pdf
share/texlive/tlpkg/dviout/DOC/serd.bmp
share/texlive/texmf-dist/doc/support/wintools/fc-cat.pdf
share/texlive/tlpkg/dviout/HYPERTEX/keyin.sty
share/texlive/tlpkg/dviout/chkfont.txt
share/texlive/tlpkg/dviout/map/ttfonts.map
share/texlive/bin/win32/pdftoppm.exe
share/texlive/tlpkg/dviout/GRAPHIC/LATEX2E/dviout.def
share/texlive/tlpkg/dviout/par/quit.par
share/texlive/tlpkg/dviout/gen_pk
share/texlive/tlpkg/dviout/files.txt
share/texlive/tlpkg/dviout/dvispc.exe
share/texlive/bin/win32/pdfinfo.exe
share/texlive/tlpkg/dviout/par/dvispcal.par
share/texlive/tlpkg/dviout/par/texhelp.par
share/texlive/texmf-dist/doc/support/wintools/pdftotext.pdf
share/texlive/texmf-dist/doc/support/wintools/gzip.pdf
share/texlive/texmf-dist/doc/support/wintools/pdfsig.pdf
share/texlive/tlpkg/dviout/HYPERTEX/hyper2.tex
share/texlive/bin/win32/pdftocairo.exe
share/texlive/tlpkg/dviout/SAMPLE/sample.tex
share/texlive/tlpkg/dviout/par/dvipskprn.par
share/texlive/tlpkg/dviout/UTILITY/dviout0.par
share/texlive/tlpkg/dviout/SPECIAL/ophook.sty
share/texlive/tlpkg/dviout/par/dvispcap.par
share/texlive/tlpkg/dviout/SPECIAL/presen.txt
share/texlive/bin/win32/pdfdetach.exe
share/texlive/tlpkg/dviout/par/presen.par
share/texlive/tlpkg/dviout/par/dvispcs.par
share/texlive/tlpkg/dviout/UTILITY/test_a4.tex
share/texlive/tlpkg/dviout/DOC/cmode3.png
share/texlive/tlpkg/dviout/par/dvipskeps.par
share/texlive/tlpkg/dviout/HYPERTEX/input.tex
share/texlive/tlpkg/dviout/chkfont.exe
share/texlive/tlpkg/dviout/ttindex.exe
share/texlive/tlpkg/dviout/GRAPHIC/PBM/pbmf.doc
share/texlive/tlpkg/dviout/UTILITY/null.vfn
share/texlive/texmf-dist/doc/support/tlaunch/tlaunch.tex
share/texlive/bin/win32/pdftohtml.exe
share/texlive/tlpkg/dviout/par/dvipdfms.par
share/texlive/tlpkg/dviout/UTILITY/dvi$pdf.bat
share/texlive/bin/win32/texview.exe
share/texlive/tlpkg/dviout/UTILITY/dviout0.vfn
share/texlive/bin/win32/sam2p.exe
share/texlive/tlpkg/dviout/propw0.txt'
|
#!/usr/bin/env bats
load test_helper
@test "Ensure /bin/fusermount hasn't SUID/GUID set" {
run gotSGid /bin/fusermount
[ "$status" -eq 1 ]
}
@test "Ensure /bin/mount hasn't SUID/GUID set" {
run gotSGid /bin/mount
[ "$status" -eq 1 ]
}
@test "Ensure /bin/ping hasn't SUID/GUID set" {
run gotSGid /bin/ping
[ "$status" -eq 1 ]
}
@test "Ensure /bin/ping6 hasn't SUID/GUID set" {
run gotSGid /bin/ping6
[ "$status" -eq 1 ]
}
@test "Ensure /bin/su hasn't SUID/GUID set" {
run gotSGid /bin/su
[ "$status" -eq 1 ]
}
@test "Ensure /bin/umount hasn't SUID/GUID set" {
run gotSGid /bin/umount
[ "$status" -eq 1 ]
}
@test "Ensure /usr/bin/bsd-write hasn't SUID/GUID set" {
run gotSGid /usr/bin/bsd-write
[ "$status" -eq 1 ]
}
@test "Ensure /usr/bin/chage hasn't SUID/GUID set" {
run gotSGid /usr/bin/chage
[ "$status" -eq 1 ]
}
@test "Ensure /usr/bin/chfn hasn't SUID/GUID set" {
run gotSGid /usr/bin/chfn
[ "$status" -eq 1 ]
}
@test "Ensure /usr/bin/chsh hasn't SUID/GUID set" {
run gotSGid /usr/bin/chsh
[ "$status" -eq 1 ]
}
@test "Ensure /usr/bin/mlocate hasn't SUID/GUID set" {
run gotSGid /usr/bin/mlocate
[ "$status" -eq 1 ]
}
@test "Ensure /usr/bin/mtr hasn't SUID/GUID set" {
run gotSGid /usr/bin/mtr
[ "$status" -eq 1 ]
}
@test "Ensure /usr/bin/newgrp hasn't SUID/GUID set" {
run gotSGid /usr/bin/newgrp
[ "$status" -eq 1 ]
}
@test "Ensure /usr/bin/pkexec hasn't SUID/GUID set" {
run gotSGid /usr/bin/pkexec
[ "$status" -eq 1 ]
}
@test "Ensure /usr/bin/traceroute6.iputils hasn't SUID/GUID set" {
run gotSGid /usr/bin/traceroute6.iputils
[ "$status" -eq 1 ]
}
@test "Ensure /usr/bin/wall hasn't SUID/GUID set" {
run gotSGid /usr/bin/wall
[ "$status" -eq 1 ]
}
@test "Ensure /usr/sbin/pppd hasn't SUID/GUID set" {
run gotSGid /usr/sbin/pppd
[ "$status" -eq 1 ]
}
|
import './sort-by'
describe('Array.prototype.sortBy', () => {
it('will sort numbers', () => {
expect([1, 2, 3].sortBy()).toEqual([1, 2, 3])
})
it('will sort numbers in reverse', () => {
expect([1, 2, 3].sortBy(-1)).toEqual([3, 2, 1])
})
it('will sort by field', () => {
expect([{ x: 1 }, { x: 2 }, { x: 3 }].sortBy('x')).toEqual([ { x: 1 }, { x: 2 }, { x: 3 }])
})
it('will sort by field in reverse', () => {
expect([{ x: 1 }, { x: 2 }, { x: 3 }].sortBy('x', -1)).toEqual([ { x: 3 }, { x: 2 }, { x: 1 }])
})
it('will sort objects by extract function', () => {
expect([{ x: 1 }, { x: 2 }, { x: 3 }].sortBy(item => item.x)).toEqual([ { x: 1 }, { x: 2 }, { x: 3 }])
})
it('will sort objects by extract function in reverse', () => {
expect([{ x: 1 }, { x: 2 }, { x: 3 }].sortBy(item => item.x, -1)).toEqual([ { x: 3 }, { x: 2 }, { x: 1 }])
})
it('will sort strings', () => {
const items = [ { name: 'c' }, { name: 'a' }, { name: 'b' } ]
const sorted = [ { name: 'a' }, { name: 'b' }, { name: 'c' } ]
expect(items.sortBy('name')).toEqual(sorted)
})
it('will sort strings in reverse', () => {
const items = [ { name: 'c' }, { name: 'a' }, { name: 'b' } ]
const sorted = [ { name: 'c' }, { name: 'b' }, { name: 'a' } ]
expect(items.sortBy('name', -1)).toEqual(sorted)
})
it('will sort strings by extract function', () => {
const items = [ { name: 'c' }, { name: 'a' }, { name: 'b' } ]
const sorted = [ { name: 'a' }, { name: 'b' }, { name: 'c' } ]
expect(items.sortBy(item => item.name)).toEqual(sorted)
})
it('will sort strings by extract function in reverse', () => {
const items = [ { name: 'c' }, { name: 'a' }, { name: 'b' } ]
const sorted = [ { name: 'c' }, { name: 'b' }, { name: 'a' } ]
expect(items.sortBy(item => item.name, -1)).toEqual(sorted)
})
})
|
#!/bin/bash
./setup.sh
# Copy systemd services
cp seam-backend/seam.service /etc/systemd/system/seam.service
cp seam-frontend/seam-dashboard.service /etc/systemd/system/seam-dashboard.service
if [ ! -d /srv/seam/server ]; then
mkdir -p /srv/seam/server
fi
cp -r seam-backend/out/. /srv/seam/server/
if [ ! -d /srv/seam/dashboard ]; then
mkdir -p /srv/seam/dashboard
fi
cp -r seam-frontend/build/. /srv/seam/dashboard/
|
<filename>Practice problems/Goldman_Sachs_Problem2.cpp
#include<bits/stdc++.h>
using namespace std;
// bool check(string str, int k, int size) {
// for(int i=0; i<size-1; i++) {
// int temp1, temp2;
// temp1 = (int) str[i];
// temp2 = (int) str[i+1];
// if(abs(temp1-temp2)>k) return false;
// }
// return true;
// }
// string compute(int k, string str) {
// int size = str.length();
// for(int window=size; window>0; window--) {
// int num = size-window+1;
// for(int i=0; i<num; i++) {
// bool isSubstring = check(str.substr(i, window), k, window);
// if(isSubstring) return str.substr(i, window);
// }
// }
// return str;
// }
string compute(int k, string word) {
string max = "";
string ans;
int ansCount = 0;
int size = word.size();
int maxCount = 0;
for(int i=0; i<size; i++) {
int temp1 = (int) word[i];
int temp2 = (int) word[i+1];
if(abs(temp1-temp2)<=k) {
maxCount++;
max = max + word[i];
}
else {
if(maxCount+1 > ansCount) {
ans = max + word[i];
ansCount = maxCount + 1;
}
max = "";
maxCount = 0;
}
}
return ans;
}
int main() {
int k;
string str;
cin >> k;
cin >> str;
string ans = compute(k, str);
cout << ans << endl;
return 0;
}
|
<reponame>ramesh-kr/sentinel<filename>sentinel-tendermint/wire.go
package sentinel
import (
"github.com/cosmos/cosmos-sdk/wire"
)
// Register concrete types on wire codec
func RegisterWire(cdc *wire.Codec) {
cdc.RegisterConcrete(MsgRegisterVpnService{}, "sentinel/registervpn", nil)
cdc.RegisterConcrete(MsgQueryRegisteredVpnService{}, "sentinel/queryvpnservice", nil)
cdc.RegisterConcrete(MsgDeleteVpnUser{}, "sentienl/deletevpnservice", nil)
cdc.RegisterConcrete(MsgRegisterMasterNode{}, "sentinel/masternoderegistration", nil)
cdc.RegisterConcrete(MsgQueryFromMasterNode{}, "sentienl/querythevpnservice", nil)
cdc.RegisterConcrete(MsgDeleteMasterNode{}, "sentinel/deletemasternode", nil)
cdc.RegisterConcrete(MsgPayVpnService{}, "sentinel/payvpnservice", nil)
cdc.RegisterConcrete(MsgRefund{}, "sentinel/clientrefund", nil)
cdc.RegisterConcrete(MsgGetVpnPayment{}, "sentinel/getvpnpayment", nil)
}
var msgCdc = wire.NewCodec()
func init() {
RegisterWire(msgCdc)
wire.RegisterCrypto(msgCdc)
}
|
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2535-1
#
# Security announcement date: 2015-03-18 00:00:00 UTC
# Script generation date: 2017-01-01 21:04:22 UTC
#
# Operating System: Ubuntu 12.04 LTS
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - php5-cli:5.3.10-1ubuntu3.17
# - libapache2-mod-php5:5.3.10-1ubuntu3.17
# - php5-cgi:5.3.10-1ubuntu3.17
# - php5-fpm:5.3.10-1ubuntu3.17
# - php5-enchant:5.3.10-1ubuntu3.17
# - php5-cgi:5.3.10-1ubuntu3.17
# - libapache2-mod-php5:5.3.10-1ubuntu3.17
# - php5-fpm:5.3.10-1ubuntu3.17
# - php5-enchant:5.3.10-1ubuntu3.17
#
# Last versions recommanded by security team:
# - php5-cli:5.3.10-1ubuntu3.25
# - libapache2-mod-php5:5.3.10-1ubuntu3.25
# - php5-cgi:5.3.10-1ubuntu3.25
# - php5-fpm:5.3.10-1ubuntu3.17
# - php5-enchant:5.3.10-1ubuntu3.17
# - php5-cgi:5.3.10-1ubuntu3.25
# - libapache2-mod-php5:5.3.10-1ubuntu3.25
# - php5-fpm:5.3.10-1ubuntu3.17
# - php5-enchant:5.3.10-1ubuntu3.17
#
# CVE List:
# - CVE-2014-8117
# - CVE-2014-9705
# - CVE-2015-0273
# - CVE-2015-2301
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade php5-cli=5.3.10-1ubuntu3.25 -y
sudo apt-get install --only-upgrade libapache2-mod-php5=5.3.10-1ubuntu3.25 -y
sudo apt-get install --only-upgrade php5-cgi=5.3.10-1ubuntu3.25 -y
sudo apt-get install --only-upgrade php5-fpm=5.3.10-1ubuntu3.17 -y
sudo apt-get install --only-upgrade php5-enchant=5.3.10-1ubuntu3.17 -y
sudo apt-get install --only-upgrade php5-cgi=5.3.10-1ubuntu3.25 -y
sudo apt-get install --only-upgrade libapache2-mod-php5=5.3.10-1ubuntu3.25 -y
sudo apt-get install --only-upgrade php5-fpm=5.3.10-1ubuntu3.17 -y
sudo apt-get install --only-upgrade php5-enchant=5.3.10-1ubuntu3.17 -y
|
<filename>src/certificate/__mocks__/CertificateProvider.ts
import { RawCertificate } from "../../shared/types/certificate/RawCertificate";
import { InvalidResponseError } from "../../shared/types/errors/InvalidResponseError";
import { ServerError } from "../../shared/types/errors/ServerError";
export class CertificateProvider {
fetchCertificateByUrl = jest.fn(
// eslint-disable-next-line @typescript-eslint/no-unused-vars
(url: string, userAgent?: string): Promise<RawCertificate> => {
return new Promise((resolve, reject) => {
switch (url) {
case "invalid.status.example.com":
reject(new InvalidResponseError(301));
case "unexpected.example.com":
reject(new Error());
case "example.com":
resolve(new RawCertificate("dadssadsa"));
default:
reject(new ServerError());
}
});
}
);
}
|
#!/bin/bash
#i:A example command.
#u:Usage: myexample hello
echo "Hello,world!"
set_log_level 4
debug "MYEXAMPLE_HOME=$MYEXAMPLE_HOME"
debug "MYEXAMPLE_CMD=$MYEXAMPLE_CMD"
log "\033[32mTest text\033[0m"
error "Test text"
warn "Test text"
debug "Test text"
|
package com.example.xty.helloagain.MyDataBase;
import org.greenrobot.greendao.annotation.Entity;
import org.greenrobot.greendao.annotation.Generated;
import org.greenrobot.greendao.annotation.Id;
import org.greenrobot.greendao.annotation.Property;
/**
* Created by Kai on 2016/10/13. Utilzed by Ve on 2018
* 课程表实体类
* 在数据库中生成对应的表
*/
@Entity
public class MyCurriculumTable {
@Id
private Long id; //主键
@Property(nameInDb = "c_id")
private int c_id;
@Property(nameInDb = "studentNumber")
private String studentNumber;
@Property(nameInDb = "classNumber")
private String classNumber;
@Property(nameInDb = "className")
private String className;
@Property(nameInDb = "cName")
private String cName;
@Property(nameInDb = "cType")
private String cType;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public int getC_id() {
return c_id;
}
public void setC_id(int c_id) {
this.c_id = c_id;
}
public String getStudentNumber() {
return studentNumber;
}
public void setStudentNumber(String studentNumber) {
this.studentNumber = studentNumber;
}
public String getClassNumber() {
return classNumber;
}
public void setClassNumber(String classNumber) {
this.classNumber = classNumber;
}
public String getClassName() {
return className;
}
public void setClassName(String className) {
this.className = className;
}
public String getcName() {
return cName;
}
public void setcName(String cName) {
this.cName = cName;
}
public String getcType() {
return cType;
}
public void setcType(String cType) {
this.cType = cType;
}
public String gettName() {
return tName;
}
public void settName(String tName) {
this.tName = tName;
}
public String getPeriod() {
return period;
}
public void setPeriod(String period) {
this.period = period;
}
public String getcTime() {
return cTime;
}
public void setcTime(String cTime) {
this.cTime = cTime;
}
public String getWeekday() {
return weekday;
}
public void setWeekday(String weekday) {
this.weekday = weekday;
}
public String getLocation() {
return location;
}
public void setLocation(String location) {
this.location = location;
}
public String getCTime() {
return this.cTime;
}
public void setCTime(String cTime) {
this.cTime = cTime;
}
public String getTName() {
return this.tName;
}
public void setTName(String tName) {
this.tName = tName;
}
public String getCType() {
return this.cType;
}
public void setCType(String cType) {
this.cType = cType;
}
public String getCName() {
return this.cName;
}
public void setCName(String cName) {
this.cName = cName;
}
@Property(nameInDb = "tName")
private String tName;
@Property(nameInDb = "period")
private String period;
@Property(nameInDb = "cTime")
private String cTime;
@Property(nameInDb = "weekday")
private String weekday;
@Property(nameInDb = "location")
private String location;
@Generated(hash = 1225329189)
public MyCurriculumTable(Long id, int c_id, String studentNumber, String classNumber,
String className, String cName, String cType, String tName, String period,
String cTime, String weekday, String location) {
this.id = id;
this.c_id = c_id;
this.studentNumber = studentNumber;
this.classNumber = classNumber;
this.className = className;
this.cName = cName;
this.cType = cType;
this.tName = tName;
this.period = period;
this.cTime = cTime;
this.weekday = weekday;
this.location = location;
}
@Generated(hash = 390820896)
public MyCurriculumTable() {
}
}
|
import { readFileSync, writeFileSync } from 'fs'
import Parser from './core'
const [puzPath, outpath] = process.argv.slice(2, 4)
const content = readFileSync(puzPath, { encoding: 'latin1' })
const parsed = new Parser({ verbose: true }).parse(content)
if (!outpath) {
// eslint-disable-next-line no-console
console.log(JSON.stringify(parsed.toJSON(), null, 2))
} else {
writeFileSync(outpath, JSON.stringify(parsed))
}
|
<filename>structural/Flyweight.js<gh_stars>1-10
class Auto {
constructor(model) {
this.model = model;
}
}
// Flyweight - легковес (Необходим для кеширования одинаковых данных)
class AutoFactory {
constructor() {
this.models = {};
}
create(name) {
let model = this.models[name];
if (model) return model;
console.count('model');
this.models[name] = new Auto(name);
return this.models[name];
}
getModels() {
console.table(this.models);
}
}
/* INIT */
const factory = new AutoFactory();
const bmw = factory.create('BMW');
const bmw2 = factory.create('BMW');
const bmw3 = factory.create('BMW');
const tesla = factory.create('Tesla');
factory.getModels();
|
from Crypto.Cipher import AES
key = b'Sixteen byte key'
def encrypt(plaintext):
cipher = AES.new(key, AES.MODE_ECB)
return cipher.encrypt(plaintext)
def decrypt(ciphertext):
cipher = AES.new(key, AES.MODE_ECB)
return cipher.decrypt(ciphertext)
|
/**
* The MIT License
*
* Copyright (C) 2015 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
* associated documentation files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge, publish, distribute,
* sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or
* substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
* NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package io.github.astrapi69.comparator;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Comparator;
import org.testng.annotations.Test;
/**
* The unit test class for the class {@link SortOrderComparator}.
*/
public class SortOrderComparatorTest
{
/**
* Test method for {@link SortOrderComparator#compare(Comparable, Comparable)}
*/
@Test
public void testComparable()
{
Comparator<Integer> comparator = new SortOrderComparator<Integer>();
final Integer i1 = 42;
final Integer lesser = i1 / 2;
final Integer same = i1;
final Integer greater = i1 * 2;
assertTrue(comparator.compare(i1, lesser) > 0);
assertTrue(comparator.compare(i1, same) == 0);
assertTrue(comparator.compare(i1, greater) < 0);
assertTrue(comparator.compare(i1, null) > 0);
comparator = new SortOrderComparator<Integer>(SortOrder.DESCENDING);
assertTrue(comparator.compare(i1, lesser) < 0);
assertTrue(comparator.compare(i1, same) == 0);
assertTrue(comparator.compare(i1, greater) > 0);
assertTrue(comparator.compare(i1, null) < 0);
comparator = new SortOrderComparator<Integer>(SortOrder.ASCENDING);
assertTrue(comparator.compare(i1, lesser) > 0);
assertTrue(comparator.compare(i1, same) == 0);
assertTrue(comparator.compare(i1, greater) < 0);
assertTrue(comparator.compare(i1, null) > 0);
comparator = SortOrderComparator.of();
assertTrue(comparator.compare(i1, lesser) > 0);
assertTrue(comparator.compare(i1, same) == 0);
assertTrue(comparator.compare(i1, greater) < 0);
assertTrue(comparator.compare(i1, null) > 0);
comparator = SortOrderComparator.of(SortOrder.DESCENDING);
assertTrue(comparator.compare(i1, lesser) < 0);
assertTrue(comparator.compare(i1, same) == 0);
assertTrue(comparator.compare(i1, greater) > 0);
assertTrue(comparator.compare(i1, null) < 0);
}
}
|
def decimal_to_binary(num):
binary = []
while num > 0:
rem = num % 2
binary.append(rem)
num = num // 2
return binary[::-1]
|
# encoding: UTF-8
Gem::Specification.new do |s|
s.platform = Gem::Platform::RUBY
s.name = 'spree_taxjar'
s.version = '4.2.0'
s.summary = 'Spree extension to calculate sales tax in states of USA'
s.description = 'Spree extension for providing TaxJar services in USA'
s.required_ruby_version = '>= 2.1.0'
s.author = ['<NAME>', '<NAME>']
s.email = ['<EMAIL>', '<EMAIL>']
s.license = 'BSD-3'
s.require_path = 'lib'
s.requirements << 'none'
spree_version = '>= 3.2.0'
s.add_dependency 'spree_backend', spree_version
s.add_dependency 'taxjar-ruby', '>= 2.6', '< 3.1'
s.add_dependency 'deface', '~> 1.0'
s.add_development_dependency 'shoulda-matchers'
s.add_development_dependency 'spree_dev_tools'
s.add_development_dependency 'vcr'
s.add_development_dependency 'webmock'
end
|
package com.damavis.spark.database.exceptions
class InvalidDatabaseNameException(name: String)
extends Exception(s""""$name" is not a valid database name""") {}
|
#include <iostream>
#include <memory>
#include <spdlog/spdlog.h>
#include <spdlog/sinks/stdout_color_sinks.h>
#include <spdlog/sinks/basic_file_sink.h>
enum class LogLevel {
Info,
Warning,
Error
};
class Logger {
public:
Logger(const std::string& name, const std::string& pattern) {
logger_ = spdlog::stdout_color_mt(name);
logger_->set_pattern(pattern);
}
void addConsoleSink() {
logger_->sinks().emplace_back(std::make_shared<spdlog::sinks::stdout_color_sink_mt>());
}
void addFileSink(const std::string& filename) {
logger_->sinks().emplace_back(std::make_shared<spdlog::sinks::basic_file_sink_mt>(filename, true));
}
void log(LogLevel level, const std::string& message) {
switch (level) {
case LogLevel::Info:
logger_->info(message);
break;
case LogLevel::Warning:
logger_->warn(message);
break;
case LogLevel::Error:
logger_->error(message);
break;
}
}
private:
std::shared_ptr<spdlog::logger> logger_;
};
int main() {
Logger engineLogger("EngineLogger", "[%H:%M:%S] [%n] [%^%l%$] %v");
engineLogger.addConsoleSink();
engineLogger.addFileSink("engine.log");
engineLogger.log(LogLevel::Info, "Engine started.");
engineLogger.log(LogLevel::Warning, "Low memory warning.");
engineLogger.log(LogLevel::Error, "Critical error: engine shutdown.");
return 0;
}
|
package nio.socket;
import java.io.PrintWriter;
import java.net.Socket;
import java.util.Scanner;
public class TcpBioClient {
@SuppressWarnings("resource")
public static void main(String[] args) throws Exception {
Socket socket = new Socket("localhost", 1000);
new Thread() {
PrintWriter writer = new PrintWriter(socket.getOutputStream(), true);
@Override
public void run() {
writer.write("hello server, i am bio client!");
writer.flush();
Scanner scan = new Scanner(System.in); // 键盘输入数据
while (scan.hasNextLine()) {
writer.write(scan.nextLine());
writer.flush();
}
}
}.start();
int readCound;
while (true) {
if ((readCound = socket.getInputStream().available()) > 0) {
byte[] buff = new byte[readCound];
socket.getInputStream().read(buff);
System.out.println(new String(buff));
}
//会阻塞
/*BufferedReader reader = new BufferedReader(new InputStreamReader(socket.getInputStream()));
String body = reader.readLine();
if (body != null) System.out.println(body);*/
}
}
}
|
package conf
import (
"fmt"
"io"
"net/url"
"path"
"strings"
"text/template"
"time"
"github.com/xavier268/go-ticket/common"
)
// Preload templates from Conf.Templates array, using the provided paths.
// The base name will be from the LAST file.
// Templates names and file names include the extension (.html or else ...)
func (c *Conf) preloadTemplates() {
if len(c.Templates.Patterns) == 0 || len(c.Templates.Paths) == 0 {
panic("Missing template files or paths in configurationg, ignoring templates ...")
}
var err error
c.Templates.UsedPath = ""
// Define function map
t := template.New("").Funcs(
template.FuncMap{
"isAdmin": func(role common.Role) bool { return role >= common.RoleAdmin },
"isNone": func(role common.Role) bool { return role == common.RoleNone },
"now": func() string { return time.Now().Format(c.TimeFormat) },
"tkturl": func(tid string) string {
u := path.Join(c.Addr.Public, c.API.Ticket)
u = strings.Replace(u, "http:/", "http://", -1)
u = strings.Replace(u, "https:/", "https://", -1)
u = u + "?" + c.API.QueryParam.Ticket + "=" + tid
return u
},
"qrurl": func(targetUrl string) string { // targetUrl is the unescaped url
u := path.Join(c.Addr.Public, c.API.QRImage)
u = strings.Replace(u, "http:/", "http://", -1)
u = strings.Replace(u, "https:/", "https://", -1)
u = u + "?" + c.API.QueryParam.QRText + "=" + url.QueryEscape(targetUrl)
return u
},
})
// identify path and load the first template
for _, p := range c.Templates.Paths {
f := path.Join(p, c.Templates.Patterns[0])
c.Templates.t, err = t.ParseGlob(f)
if err == nil {
c.Templates.UsedPath = p
if c.Test.Verbose {
fmt.Println("Found Template path : ", p)
}
break
} else {
if c.Test.Verbose {
fmt.Println("Template path : ", p, " for ", f, err)
}
}
}
if err != nil {
fmt.Println(c.String())
panic("Could not load first template ! ")
}
// load the next templates ...
for _, tp := range c.Templates.Patterns[1:] {
f := path.Join(c.Templates.UsedPath, tp)
c.Templates.t, err = c.Templates.t.ParseGlob(f)
if err != nil && c.Test.Verbose {
fmt.Println("Error parsing template : ", err)
}
}
if c.Test.Verbose {
fmt.Println(c.Templates.t.DefinedTemplates())
}
}
// ExecuteTemplate write the computed template to w.
func (c *Conf) ExecuteTemplate(w io.Writer, templateName string, data interface{}) {
err := c.Templates.t.ExecuteTemplate(w, templateName, data)
if err != nil {
fmt.Println("Error executing ", templateName, err)
}
}
|
import { NgModule } from '@angular/core';
import { ChartCommonModule } from '../common/chart-common.module';
import { BubbleChartComponent } from './bubble-chart.component';
import { BubbleSeriesComponent } from './bubble-series.component';
import { LineChartModule } from '../line-chart/line-chart.module';
export { BubbleChartComponent, BubbleSeriesComponent };
@NgModule({
imports: [
ChartCommonModule,
LineChartModule
],
declarations: [
BubbleChartComponent,
BubbleSeriesComponent
],
exports: [
BubbleChartComponent,
BubbleSeriesComponent
]
})
export class BubbleChartModule {}
|
<filename>models/messages.js
import { Schema, model, SchemaTypes } from 'mongoose';
var messageSchema = new Schema({
idUser: {
type: SchemaTypes.ObjectId,
ref: 'Users',
required: true,
},
idUserChatting: {
type: SchemaTypes.ObjectId,
ref: 'Users',
required: true,
},
type: {
type: Number,
required: true,
},
content: {
type: String,
required: true,
}
}, {
timestamps: true,
});
var Messages = model('Messages', messageSchema);
module.exports = Messages;
|
<reponame>cmur2/chef-klaus<filename>metadata.rb
name "klaus"
maintainer "<NAME>"
maintainer_email "<EMAIL>"
license "Apache 2.0"
description "Installs klaus (the git web viewer) via pip and configures it"
long_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))
version "1.0.0"
depends "python"
#suggests ""
supports "debian"
|
<gh_stars>0
# Copyright (c) [2022] Huawei Technologies Co.,Ltd.ALL rights reserved.
# This program is licensed under Mulan PSL v2.
# You can use it according to the terms and conditions of the Mulan PSL v2.
# http://license.coscl.org.cn/MulanPSL2
# THIS PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
####################################
# @Author :
# @email :
# @Date :
# @License : Mulan PSL v2
#####################################
from flask_restful import Api
from .routes import Login, Org, OrgItem
def init_api(api: Api):
api.add_resource(Login, '/api/v1/admin/login', endpoint='admin_login')
api.add_resource(Org, '/api/v1/admin/org', endpoint='admin_org')
api.add_resource(OrgItem, '/api/v1/admin/org/<int:org_id>')
|
package pages;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.support.FindBy;
public class ForgotPass extends Login {
@FindBy(xpath = "//input[@formcontrolname='email']")
private WebElement fieldEmail;
@FindBy(xpath = "//span[contains(text(),'Request Password Reset')]")
private WebElement buttonRequestPassReset;
@FindBy(xpath = "//span[contains(text(),'Back to Login')]")
private WebElement buttonBackLogin;
}
|
def add_two_nums(num1, num2):
return num1 + num2
result = add_two_nums(2, 5)
print(result)
|
#!/bin/bash
set -e
##################################################################################################################
# Written to be used on 64 bits computers
# Author : Erik Dubois
# Website : http://www.erikdubois.be
##################################################################################################################
##################################################################################################################
#
# DO NOT JUST RUN THIS. EXAMINE AND JUDGE. RUN AT YOUR OWN RISK.
#
##################################################################################################################
echo "Making sure firefox looks great in dark themes"
echo "You should run this script after you rebooted and are in i3."
echo "Firefox must have started once. The directory will not exist otherwise."
cp -r settings/firefox/chrome/ ~/.mozilla/firefox/*.default
echo "Reboot firefox to see the effect"
echo "################################################################"
echo "######### firefox settings installed ################"
echo "################################################################"
|
#!/usr/bin/env bash
export branch_name="${BRANCH_NAME:-local}"
export image_name="ajoelpod/mock-json-server"
docker push $image_name:$branch_name
if [ "${branch_name}" = "master" ]; then
docker tag $image_name:$branch_name $image_name:latest
docker push $image_name:latest
fi
docker rmi $image_name:$branch_name --force
|
var provider = new firebase.auth.GoogleAuthProvider();
provider.addScope('https://www.googleapis.com/auth/userinfo.profile');
firebase.auth().useDeviceLanguage();
async function emailLogin() {
}
async function googleLogin() {
firebase.auth().signInWithPopup(provider).then(function (result) {
var token = result.credential.accessToken;
var user = result.user;
console.log(result)
localStorage.setItem("user", JSON.stringify(result.user))
// var settings = {
// "async": true,
// "crossDomain": true,
// "url": "https://www.googleapis.com/identitytoolkit/v3/relyingparty/getAccountInfo?key=" + result.user.l,
// "method": "POST",
// "headers": {},
// "data": {
// "idToken": <PASSWORD>
// }
// }
// $.ajax(settings).done(function (response) {
// console.log(response);
location.href = "../"
// });
}).catch(function (error) {
// Handle Errors here.
var errorCode = error.code;
var errorMessage = error.message;
// The email of the user's account used.
var email = error.email;
// The firebase.auth.AuthCredential type that was used.
var credential = error.credential;
// ...
});
}
// firebase.auth().getRedirectResult().then(function (result) {
// if (result.credential) {
// // This gives you a Google Access Token. You can use it to access the Google API.
// var token = result.credential.accessToken;
// // ...
// }
// // The signed-in user info.
// var user = result.user;
// console.log(result)
// }).catch(function (error) {
// alert(error)
// });
|
<reponame>jimallman/temporal-modelling
/*
Copyright 2010 by the Rector and Visitors of the University of Virginia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/***********************************************************
*
* $Author: jima $
*
* $Date: 2004/02/11 02:29:24 $
*
* $Revision: 1.17 $
*
* $Log: ModelWranglers.js,v $
* Revision 1.17 2004/02/11 02:29:24 jima
* Added Cancelled inflection
*
* Revision 1.16 2004/02/10 19:15:21 jima
* Added Causality sprite, moving on to Inspector...
*
* Revision 1.15 2004/02/05 21:44:18 jima
* We have working Special inflections, now I'll work on Causality.
*
* Revision 1.14 2004/02/04 20:04:11 jima
* Currently in rough shape. I'm going to tighten up layer vs. parent distinction, finish freeze/thaw logic
*
* Revision 1.13 2004/01/31 08:32:01 jima
* ...more progress, saving OK i think, but won't load properly...
*
* Revision 1.12 2004/01/31 07:44:59 jima
* still debugging selective freeze..
*
* Revision 1.11 2004/01/31 06:10:53 jima
* Fixed mismatched {
*
* Revision 1.10 2004/01/31 06:04:42 jima
* Selective freeze and thaw functions are done(?), ready for testing..
*
* Revision 1.9 2004/01/31 05:11:22 jima
* Working out thawTemporalModel(), including layer consolidation option
*
* Revision 1.8 2004/01/30 20:41:54 jima
* Moving to more selective model-wrangling, based on selected objects rather
* than just layers. freezeTemporalModel() is ready to test, now I'll do
* restoreFromTemporalModel()
*
* Revision 1.7 2004/01/30 15:59:56 jima
* Save BEFORE attempting unified methods for load/save and clipboard
* functions
*
* Revision 1.6 2004/01/29 16:48:10 jima
* Added fairly strong UUID generator, to avoid ID collisions in loaded models
*
* Revision 1.5 2004/01/28 05:13:46 jima
* Inflections are now full-fledged objects, vs. relations (and stored
* accordingly)
*
* Revision 1.4 2004/01/27 16:56:47 jima
* Removed 'Source' keyword (not required)
*
* Revision 1.3 2004/01/27 15:58:03 jima
* Added keyword expansion, perhaps a standard file header
*
*
***********************************************************/
/*
Functions for loading and saving (marshalling, freezing
and thawing) data for "temporal models"
A temporal model is a strictly transitive object, not a
persistent temporal primitive that can be tinkered with.
In fact, it's more of a transport container, a bundle of
stored stuff: Play Space settings, layer specs, color
settings, temporal objects and temporal relations. (See
the sample XML for details).
For this reason, we should only wrangle temporal models
in the Play Space when it's time to wrap or unwrap data.
Once XML data's been un-wrapped, it's dumped into the free-
for-all environment of the Play Space. When the user decides
to "save a model", we create a temporal model by capturing
all the requested layers, their objects, and any selected
Play Space settings into an XML string for storage.
SO... there really isn't any need for a TemporalModel class,
just a set of methods for wrapping and un-wrapping this data
(from XML to live objects and back).
TODO: Determine whether a newly loading model should assert
its own labels on other objects in the PS, or simply strip
their current labels.
TODO: Apply some scheme to reconcile object IDs in multiple
models? Conflicting names, too? Or should we simply treat all
IDs as temporary/local?
Here's a thought experiment: What happens if we load the model "Battle of the
Bulge" into the Play Space, then load it again? I think there
should be two (duplicate) sets of data, but with different IDs
and layers
TODO: Outline class-names and define classes for some inflection types:
Special
.sprite => mc, .target => obj
targetID=
Causality
.sprite => mc, .source => obj, .targetObj => obj
sourceID=, targetID=
EncompassingMood:
.sprite => mc, .includedObjects => [obj, obj, ...]
includedIDs= {comma-delimited ID values}
***********************************************************/
function generateUUID( className ) {
// Generate a universally unique ID. This should include the specified
// class-name, if one was provided. Here we try to follow the principles
// for a solid UUID/GUID: a theoretically unique stamp based on spatial,
// temporal, and randomized elements.
// NOTE that, while hex would make this ID shorter, we only use it
// for the IP address, since other numbers are usually too long for
// Actionscript to render as hex (go figure..)
var strUUID = "";
if (typeof(className) == 'string') {
// Prepend with class-name, if provided. Note that this is largely
// for legibility in model XML, so we can see what kinds of objects
// are linked within a model.
strUUID += (className +":");
}
// Add a large random number (in case of rapid-fire generation of UUIDs)
var bigRandomNumber = String(Math.random()).substr(2);
strUUID += bigRandomNumber;
// Add a token for location (IP address), in hex
// NOTE that we've already loaded the client's presumed IP address
// in the root Flash timeline, as 'currentIPAddress'
strUUID += ":";
var strOctet, IParray = currentIPAddress.split('.');
for (var i = 0; i < 4; i++) {
strOctet = Number(IParray[i]).toString(16);
if (strOctet.length == 1) {
// pad single digit numbers
strOctet = "0"+ strOctet;
}
strUUID += strOctet;
}
// Add a "safety" token for elapsed time (milliseconds since this Flash
// movie started playing)
var timeComponent1 = getTimer();
strUUID += (":"+ timeComponent1);
// Add a token for current timestamp (UTC millisec from Date)
var tempDate = new Date();
var timeComponent2 = tempDate.getTime();
delete tempDate;
strUUID += (":"+ timeComponent2);
return strUUID;
}
// Restore selected layers from a temporal-model XMLDOM (create and populate
// the specified objects in the Play Space), then return a simple result code
// ("OK" or an error string)
function thawTemporalModel( modelNode, arrIncludedLayerNodes, blnApplyPSlabels, blnApplyPSlayout, blnConsolidateLayers ) {
// 'arrLayerNodes' is an array of <Layer> nodes (presumably from a single parent
// <TemporalModel>) whose data we've chosen to thaw. NOTE: An empty array is OK, maybe
// we just want the Play Space labels..
if (arrIncludedLayerNodes.length == 0) {
if ((!blnApplyPSlabels) && (!blnApplyPSlayout)) {
// Weird, they're not actually asking for anything from this model.. bail out!
_root.report( "No layers specified, no labels or layout.. What's the point?" );
return "No layers specified, no labels or layout.. What's the point?";
}
} else if ((arrIncludedLayerNodes instanceof Array) == false) {
_root.report( "thawTemporalModel(): ERROR - second arg is not an Array!" );
return "Second arg is not an Array (of Layer nodes)!";
}
// If 'blnApplyPSlabels' is true, then this model should assert its
// stored label settings in the Play Space (convert existing objects?!)
// If 'blnApplyPSlayout' is true, then the thawed objects should assume
// their previous scale and position in the Play Space. Otherwise, general
// layout rules should determine their placement.
// If 'blnConsolidateLayers' is true, then we should look for
// same-named layers in the Play Space, and place our restored objects
// in the matching layer, if avaialble.
// First, let's apply any Play Space settings that were chosen
// NOTE: As a rule, we shouldn't expect this when pasting from the
// clipboard, but only when loading a stored model.
if (blnApplyPSlabels) {
// TODO: Use these settings in the current Play Space (assert
// in the current PS objects, or clear their labels?)
_root.report( "I'll restore its labels!" );
// Restore these Play Space settings from the input XML
// NOTE: This is a comma-delimited list of escape'd label names, with
// empty entries for untitled labels, for example:
// "critical%20days,major%20causes,disruptive%20forces,,,major%20surprises,,,,"
var labelsNode = _root.XMLSupportLib.selectSingleNode( modelNode, 'PlaySpaceSettings' );
var arrSafeLabels = labelsNode.attributes.labels.split(',');
var n, labelName;
for (n = 0; n < arrSafeLabels.length; n++) {
// Restore the name of each label to the Play Space (if any, or "")
var labelName = unescape( arrSafeLabels[n] ); // Restore non-URL-safe characters
if (labelName.length == 0) {
// The Play Space uses null if no proper name
_root.labels[n] = null;
} else {
_root.labels[n] = labelName;
}
}
}
if (blnApplyPSlayout) {
// TODO: Define what exactly we're capturing here, and how to restore it..
_root.report( "Now I'd restore its Play Space layout!" );
}
// Groom the incoming XML, to weed out all unused layers and objects,
// and assign unique Play Space IDs to all nodes
modelNode = groomIncomingModelXML( modelNode, arrIncludedLayerNodes, blnConsolidateLayers );
// OK, now find and step through all the listed layers; merge or rename
// them as needed, create all their child objects and relations, place
// them in the Play Space based on stored layout *or* general rules
var i, aLayerNode, layerID, objLayer, itsID;
var arrIncludedObjectIDs = new Array();
// We'll keep track of the IDs of all objects in these layers, so
// that we can quickly load their relations
for (i = 0; i < arrIncludedLayerNodes.length; i++) {
aLayerNode = arrIncludedLayerNodes[i];
if (aLayerNode.nodeName != 'Layer') {
_root.report( "thawTemporalModel(): ERROR - missing or invalid Layer node!" );
return "ERROR - missing or invalid Layer node!";
} else {
var currentLayerID = aLayerNode.attributes.ID;
_root.report( "Analyzing layer '"+ aLayerNode.attributes.name +"' ("+ currentLayerID +")..." );
// Reincarnate this layer into the Play Space, and grab a pointer to the new Layer object
objLayer = _root.newLayer( aLayerNode );
// TODO: Consolidate all this XML-to-visible-sprite stuff in a consistent manner!
// Set the literal Flash coordinates etc in the Layer's sprites. NOTE that here
// we support a stack of Flash sprite instances, e.g. objLayer.sprites[ 'points' ]._x
var aSpriteName, aSpriteInstance;
for (aSpriteName in objLayer.sprites) {
// Read names from a keyed list, then retrieve each instance to set its props
aSpriteInstance = objLayer.sprites[ aSpriteName ];
aSpriteInstance._x = aLayerNode.attributes.spriteX;
aSpriteInstance._y = aLayerNode.attributes.spriteY;
aSpriteInstance._xscale = aLayerNode.attributes.spriteXscale;
aSpriteInstance._yscale = aLayerNode.attributes.spriteYscale;
aSpriteInstance._alpha = aLayerNode.attributes.spriteAlpha;
}
}
// TODO: List any temporal objects belonging to this layer, and thaw
// them into live objects
// - If *not* applying stored labels, remove all label settings
// - If *not* applying stored layout, place according to general rules
// - Add its ID (as stated in the source XML) to arrIncludedObjectIDs
var objectCollectionNode = _root.XMLSupportLib.selectSingleNode( modelNode, 'TemporalObjects' );
_root.report("@ found object collection: <"+ objectCollectionNode.nodeName +">");
var objectNode = objectCollectionNode.firstChild;
while (objectNode != null) {
if (objectNode.attributes.layerID == currentLayerID) {
_root.report( "Now I'll restore this <"+ objectNode.nodeName +">..." );
// Yes, this object belongs to the included layer
_root.restoreObjectIntoWorkspace( objectNode );
}
// step to the next object node (if any) and continue
objectNode = objectNode.nextSibling;
}
/// itsID = blah; //TODO
/// arrIncludedObjectIDs.push( itsID );
// NOTE that these objects may have relationships with others in the
// saved model, and there's no way to tell which order they'll load in
//
// TODO: Come up with a way of creating all, then linking as appropriate?
// Perhaps we create all of them, stash in the global object registry,
// and (optionally) create a Flash sprite for each.. THEN we keep buzzing
// through an 'orphans' list (of objects from this model), assigning children
// to parents until everyone's found a home (even if it's just "floating"
// within a parent layer).
}
// Now let's re-unite any objects to their parents (if restored). NOTE that some
// objects simply float within a Layer; these have been replaced in the proper
// layer, so we're only interested in objects whose parents are something else
// (eg. Timelines)
//
// We'll simply buzz through the global registry of Temporal Objects and look for
// anything with a special-purpose 'storedParentID' attribute. If found, this should
// be used to re-unite the object to its parent, then it should be deleted.
/// _root.report( "Now let's re-unite orphaned objects with their parents..." );
var childID, testChild, itsStoredParentID;
var testID, testParent;
for (childID in _root.objTemporalObjectRegistry) {
testChild = _root.objTemporalObjectRegistry[childID];
// look for a stored parent ID (found only in newly-imported
// objects)...
var itsStoredParentID = testChild.xmlSourceNode.attributes.parentID;
// same treatment for stored layer ID, if any
var itsStoredLayerID = testChild.xmlSourceNode.attributes.layerID;
// ...and look for other IDs that link inflections to primitives
var itsStoredSourceID = testChild.xmlSourceNode.attributes.sourceID;
var itsStoredTargetID = testChild.xmlSourceNode.attributes.targetID;
var testObject;
_root.report( "Checking object '"+ childID +"' for stored links..." );
if ((typeof(itsStoredParentID) == 'string') ||
(typeof(itsStoredLayerID) == 'string') ||
(typeof(itsStoredSourceID) == 'string') ||
(typeof(itsStoredTargetID) == 'string')){
// This object has one or more stored links! Let's find and restore them
if (typeof(itsStoredParentID) == 'string') {
_root.report( "Yes, '"+ childID +"' has a stored parentID '"+ itsStoredParentID +"'" );
// try to find its stored parent
for (testID in _root.objTemporalObjectRegistry) {
testObject = _root.objTemporalObjectRegistry[testID];
if (testObject.ID == itsStoredParentID) {
testObject.children.push( testChild );
testChild.parent = testObject;
break; // that's it..
}
}
}
if (typeof(itsStoredLayerID) == 'string') {
_root.report( "Yes, '"+ childID +"' has a stored layerID '"+ itsStoredLayerID +"'" );
// try to find its stored Layer
for (testID in _root.objTemporalObjectRegistry) {
testObject = _root.objTemporalObjectRegistry[testID];
if (testObject.ID == itsStoredLayerID) {
// This time, we use a standard method
testObject.AddObject( testChild );
}
}
}
if (typeof(itsStoredSourceID) == 'string') {
_root.report( "Yes, '"+ childID +"' has a stored sourceID '"+ itsStoredSourceID +"'" );
// try to find its stored source
for (testID in _root.objTemporalObjectRegistry) {
testObject = _root.objTemporalObjectRegistry[testID];
_root.report("checking reg (obj '"+ testID +"'/'"+ testObject.ID +"')");
if (testObject.ID == itsStoredSourceID) {
testObject.inflections.push( testChild );
testChild.source = testObject;
_root.report("| found the parent!");
_root.report("| parent now has '"+ testObject.inflections.length +"' inflections");
_root.report("| new inflection is '"+ testObject.inflections[testObject.children.length - 1].ID +"'");
_root.report("| new source is '"+ testChild.source.ID +"'");
}
}
}
if (typeof(itsStoredTargetID) == 'string') {
_root.report( "Yes, '"+ childID +"' has a stored targetID '"+ itsStoredTargetID +"'" );
// try to find its stored target
for (testID in _root.objTemporalObjectRegistry) {
testObject = _root.objTemporalObjectRegistry[testID];
if (testObject.ID == itsStoredTargetID) {
testObject.inflections.push( testChild );
testChild.target = testObject;
}
}
}
// TODO: Force it to adopt its stored position immediately?
/// testChild.sprite.refreshDisplay();
}
}
// Now clean up the objects in the registry of any storageIDs, to avoid collisions with
// future loaded models
for (objID in _root.objTemporalObjectRegistry) {
anObject = _root.objTemporalObjectRegistry[objID];
delete anObject.xmlSourceNode;
}
return "OK";
}
/* Check to see if a given item exists in a list (an Array or Object);
* return true or false */
function itemFoundInList( targetItem, testList ) {
// NOTE: By 'list', we mean an Array or Object (namespace)
if ((testList instanceof Array) || (testList instanceof Object)) {
var i, testItem;
for (i in testList) { // produces property name, or iterator for Arrays
testItem = testList[i];
if (testItem == targetItem) return true; // found it!
}
return false;
} else {
_root.report( "itemFoundInList(): Expected an Array or Object, not <"+ typeof(testList) +">" );
return false;
}
}
// Capture all the specified objects (and their layers), as well as general
// PlaySpace settings, and return an XMLDOM. This model might be pushed to
// the server for storage, or copied to the clipboard
function freezeTemporalModel( arrSelectedObjects, blnIncludePSlabels, blnIncludePSlayout ) {
/*
* If a listed object is itself a PlaySpaceLayer, then grab all its
* contents just as before.
*
* If it's another class of object, find its PSLayer and add it (if
* it's not there already) and the selected object.
*
* WATCH FOR DUPLICATES, especially if both an object and its PSLayer
* were chosen for inclusion.
*/
// If 'blnIncludePSlabels' is false, don't include label settings and
// strip them from all included primitives
if (blnIncludePSlabels == null)
blnIncludePSlabels = true; // true by default
// If 'blnIncludePSlayout' is false, remove all references to PS layout
// (position and scale of primitives) and rely on general layout rules to
// organize these objects when loaded into the Play Space
if (blnIncludePSlayout == null)
blnIncludePSlayout = true; // true by default
// Create a new XMLDOM to hold our model data
var xmlOutput = new XML( );
xmlOutput.ignoreWhite = true;
// Create and attach the main <TemporalModel> node
var modelNode = xmlOutput.createElement( 'TemporalModel' );
xmlOutput.appendChild( modelNode );
// Now create and attach other major nodes
// Create a settings node and attach core attributes
var settingsNode = xmlOutput.createElement( 'PlaySpaceSettings' );
modelNode.appendChild( settingsNode );
// Always add the Play Space version (might determine data compatibility)
settingsNode.attributes.psVersion = "??"; // TODO: Get from main PS movie?
settingsNode.attributes.nowSliderActive = "true"; // TODO: Get real value
settingsNode.attributes.nowSliderStyle = "1";
// TODO: Get real value (perhaps a more descriptive string?)
// Create and attach the main "collection" nodes
var layersCollectionNode = xmlOutput.createElement( 'LayerCollection' );
modelNode.appendChild( layersCollectionNode );
var objectsCollectionNode = xmlOutput.createElement( 'TemporalObjects' );
modelNode.appendChild( objectsCollectionNode );
if (blnIncludePSlabels) {
// Save these Play Space settings in the output XML
// NOTE: This is a comma-delimited list of escape'd label names, with
// empty entries for untitled labels, for example:
// "critical%20days,major%20causes,disruptive%20forces,,,major%20surprises,,,,"
var n, labelName, safeName, strLabelList;
var arrLabelNames = new Array();
for (n = 0; n < _root.labels.length; n++) { // TODO: Fill in the proper source here!
// Add the name of each label in the Play Space (if any, or "")
labelName = _root.labels[n];
// load from the list; save an empty string if it's null!
if (labelName == null) {
safeName = "";
} else { // encode for safe storage
safeName = escape( labelName );
}
arrLabelNames.push( safeName );
}
// Concatenate the layer names into a single string
strLabelList = arrLabelNames.join(",");
settingsNode.attributes.labels = strLabelList;
}
// Build a collection of all the layers we'll need
var arrLayers = new Array();
var i, testObject, itsLayer;
for (i in arrSelectedObjects) { // an iterator
testObject = arrSelectedObjects[i];
var strReport = _root.DataLib.itemize( testObject );
_root.report( "Testing selected objects for this model..." );
if (testObject.className == 'PlaySpaceLayer') {
_root.report( " It's a PlaySpaceLayer!" );
// they selected a PSLayer, so add it
arrLayers.push( testObject );
} else if (testObject.layerData) {
_root.report( " It's a PSLayer ENTRY!" );
// it's a PSLayer entry, incl. "meta-data" about the layer;
// let's extract the PSLayer object itself
arrLayers.push( testObject.layerData );
} else {
_root.report( " It's some kind of temporal object: '"+ testObject.className +"'");
// it's another kind of object, find and add its Layer
itsLayer = testObject.layer;
arrLayers.push( itsLayer );
}
}
// OK, now step through all the specified layer objects; capture
// needed information about all their child objects (or just the
// selected ones), and record it all as XML nodes. Store labels and
// layout info too, if requested
var layerID, objLayerEntry, objLayer, tempNode, childCount, c, objChild;
var arrIncludedObjects = new Array();
// We'll keep track of all objects in these layers, so that
// we can quickly test for any dependencies before saving..
for (i in arrLayers) { // increments
objLayer = arrLayers[i]; // the actual PlaySpaceLayer object
// Add a <layer> node for each included layer, with its name and description
// Assume that their node order corresponds to Z-depth (back to front)!
tempNode = xmlOutput.createElement( 'Layer' );
layersCollectionNode.appendChild( tempNode );
///var strReport = _root.DataLib.itemize( objLayer );
///_root.report( "About the PlaySpaceLayer object:\n"+ strReport );
// Let's add layer attributes in reverse order, Flash will flip 'em
// first, some particulars about position, magnification
// these are literal Flash coordinates etc from the object's sprite
// NOTE: Each layer owns a stack of Flash sprite-instances; let's just read
// from one member of this object/array (since all share common properties)
var testSprite = objLayer.sprites[ 'points' ];
if (!testSprite) {
_root.WindowLib.report( "freezeToTemporalModel(): ERROR - missing Layer sprite 'points'!" );
return "ERROR - missing Layer sprite 'points'!";
}
tempNode.attributes.spriteAlpha = testSprite._alpha;
tempNode.attributes.spriteYscale = testSprite._yscale;
tempNode.attributes.spriteXscale = testSprite._xscale;
tempNode.attributes.spriteY = testSprite._y;
tempNode.attributes.spriteX = testSprite._x;
// TODO: The PlaySpaceLayer's props (should be more abstract, where 1.0 is
// the width of the current display area)
// add the "granularity zoom" for Timelines, Intervals, etc. in this Layer
tempNode.attributes.zoomLevel = _root.getMetadataForLayer( objLayer ).zoomLevel;
// and now the basics
tempNode.attributes.description = objLayer.description;
tempNode.attributes.name = objLayer.name;
tempNode.attributes.ID = objLayer.ID;
if ((itemFoundInList( objLayer, arrSelectedObjects )) ||
(itemFoundInList( _root.getMetadataForLayer(objLayer), arrSelectedObjects ))) {
// add *all* objects in this layer to our included objects
addLayerContents( objLayer, arrIncludedObjects );
}
// NOTE that ultimately, all the objects in this layer might be
// rejected for some reason. Let's play it safe and keep the Layer
// anyway, since its own properties might be important to this user
}
// now add the non-Layer selections (but watch for duplicates!)
for (i in arrSelectedObjects) {
testObject = arrSelectedObjects[i];
if (testObject.className != 'PlaySpaceLayer') {
// it's a candidate, make sure it's not already included
if (!itemFoundInList( testObject, arrIncludedObjects )) {
// it's a new one, add it now
arrIncludedObjects.push( testObject );
}
}
}
var strReport = _root.DataLib.itemize( arrIncludedObjects, "*" );
_root.report( "About the included objects:\n"+ strReport );
// Test each of the included objects, to see if we should add it to the
// <TemporalObjects> node. (Some classes won't be saved if they fail
// certain tests.)
var objectCount = arrIncludedObjects.length;
var c, objChild, itsSprite;
var includedInSavedModel;
for (c = 0; c < objectCount; c++) {
objChild = arrIncludedObjects[c];
/* Some classes (e.g. inflections) have additional requirements to
* be saved. Test here for dependencies and, if some requirement
* isn't being met, skip to the next object.
*/
includedInSavedModel = true;
switch (objChild.className) {
case 'Special':
case 'Cancelled':
// These inflections should have a single target object in our chosen layers
if (!itemFoundInList( objChild.target, arrIncludedObjects )) {
// it's not being saved!
includedInSavedModel = false;
}
break;
case 'Causality':
// This inflection should have a source and target object; both must be
// included in our chosen layers for this to save!
if (!itemFoundInList( objChild.source, arrIncludedObjects )) {
// it's not being saved!
includedInSavedModel = false;
}
if (!itemFoundInList( objChild.target, arrIncludedObjects )) {
// it's not being saved!
includedInSavedModel = false;
}
break;
default:
// assume that the remaining classes are straightforward (save 'em)
break;
}
// If we're not saving this object, skip to the next one
if (!includedInSavedModel) {
_root.report( "Not saving this '"+ objChild.className +"' -- missing dependencies!");
continue;
}
// Still here? Then we're saving this object's data..
// Add a node for this object, using the.className as its node-name
tempNode = xmlOutput.createElement( objChild.className ); // eg. <Point>
objectsCollectionNode.appendChild( tempNode );
// Grab its sprite (movie clip instance) for additional properties
childSprite = objChild.sprite;
// Populate the new node with attributes. Note that we'll add them
// "backwards" (class-specific attributes first, then the
// generic/core attributes) for more legible XML
switch (objChild.className) {
case 'Axis':
// store position, size, length (visual), duration (time), scale and granularity
// these are literal Flash coordinates etc from the object's sprite
tempNode.attributes.spriteWidth = childSprite.spriteWidth; // = endCap._x;
tempNode.attributes.spriteY = childSprite._y;
tempNode.attributes.spriteX = childSprite._x;
// TEST the object's props (should be more abstract, where 1.0 is
// the width of the current display area)
tempNode.attributes.totalDuration = objChild.totalDuration;
tempNode.attributes.displayLength = objChild.displayLength;
tempNode.attributes.displayY = objChild.displayY;
tempNode.attributes.displayX = objChild.displayX;
///tempNode.attributes.duration // currently implied?
///tempNode.attributes.zoom // inherits from parent Layer?
///tempNode.attributes.timeScale = objChild.
///tempNode.attributes.granularity = objChild.
break;
case 'Event':
// store position (on parent axis? or screen?), length (visual), duration (time),
// unary inflections
tempNode.attributes.spriteWidth = childSprite.spriteWidth; // = endCap._x;
tempNode.attributes.spriteY = childSprite._y;
tempNode.attributes.spriteX = childSprite._x;
tempNode.attributes.startTime = objChild.startTime;
tempNode.attributes.endTime = objChild.endTime;
break;
case 'Interval':
// store position (on parent axis? or screen?), length (visual), duration (time),
// unary inflections, "end conditions" (definite start? fuzzy end?)
tempNode.attributes.spriteWidth = childSprite.spriteWidth; // = endCap._x;
tempNode.attributes.spriteY = childSprite._y;
tempNode.attributes.spriteX = childSprite._x;
tempNode.attributes.startTime = objChild.startTime;
tempNode.attributes.endTime = objChild.endTime;
break;
case 'Instant':
// store position (on parent axis? or screen?), unary inflections
tempNode.attributes.spriteY = childSprite._y;
tempNode.attributes.spriteX = childSprite._x;
tempNode.attributes.position = objChild.position;
break;
case 'Special':
case 'Cancelled':
// Store the ID of its target object, plus description etc
tempNode.attributes.targetID = objChild.target.ID;
break;
case 'Causality':
// Store IDs of source and target object, strength, etc
tempNode.attributes.sourceID = objChild.source.ID;
tempNode.attributes.targetID = objChild.target.ID;
tempNode.attributes.strength = objChild.strength;
// stash the X and Y position of its label (dictates curve
// of the arrow's arc)
tempNode.attributes.labelX = objChild.sprite.slidingLabel._x;
tempNode.attributes.labelY = objChild.sprite.slidingLabel._y;
break;
}
/* Test for common-but-not-universal properties next */
// Add IDs of this object's children, if any
if (objChild.children) {
var cc, subChild, strChildrenIDs;
var arrChildrenIDs = new Array();
/// _root.report("testing "+ objChild.children.length +" children of '"+ objChild.ID +"'...");
// add IDs of any children in a composite attribute
for (cc = 0; cc < objChild.children.length; cc++) {
subChild = objChild.children[cc];
_root.report("found a "+ subChild.className +" called '"+ subChild.ID +"'");
// Test each child, to see if it's along for the ride
if (itemFoundInList( subChild, arrIncludedObjects ))
arrChildrenIDs.push( subChild.ID );
}
strChildrenIDs = arrChildrenIDs.join(",");
tempNode.attributes.childrenIDs = strChildrenIDs;
}
// Add IDs of this object's inflections, if any
if (objChild.inflections) {
var cc, subChild, strChildrenIDs;
var arrChildrenIDs = new Array();
/// _root.report("testing "+ objChild.inflections.length +" inflections of '"+ objChild.ID +"'...");
// add IDs of any inflections in a composite attribute
for (cc = 0; cc < objChild.inflections.length; cc++) {
subChild = objChild.inflections[cc];
_root.report("found a "+ subChild.className +" called '"+ subChild.ID +"'");
// Test each inflection, to see if it's along for the ride
if (itemFoundInList( subChild, arrIncludedObjects ))
arrChildrenIDs.push( subChild.ID );
}
strChildrenIDs = arrChildrenIDs.join(",");
tempNode.attributes.inflectionIDs = strChildrenIDs;
}
// TODO: Add 'contentsIDs' (if .contents), for Layer and
// Encompassing Mood
/* Add "core" (universal) properties last (they'll appear first in
* the resulting XML)
*/
// Include label index (list position? or name? both for now..) and strength
if (objChild.labelIndex == null) { // this object has no label
tempNode.attributes.labelIndex = "";
tempNode.attributes.labelName = "";
tempNode.attributes.labelStrength = "";
} else {
tempNode.attributes.labelIndex = objChild.labelIndex;
tempNode.attributes.labelName = _root.labels[ objChild.labelIndex ];
tempNode.attributes.labelStrength = objChild.labelStrength;
}
// Test the parent object, to see if it's along for the ride
if (objChild.parent != undefined) { // don't assume there's a parent!
if (itemFoundInList( objChild.parent, arrIncludedObjects )) {
tempNode.attributes.parentID = objChild.parent.ID;
} else {
// it's not included; use the layer ID as parent
tempNode.attributes.parentID = objChild.getLayer().ID;
}
}
tempNode.attributes.layerID = objChild.layer.ID;
tempNode.attributes.description = objChild.description;
tempNode.attributes.name = objChild.name;
tempNode.attributes.ID = objChild.ID;
}
return xmlOutput;
}
function getAllDescendants( objTarget, arrDescendants ) {
// Add any children of the target to the provided array
// NOTE that this is a recursive function; any child object
// that has children of its own should be called too
if (objTarget.children) {
// add each child to the array, and recursively call each one
var childCount = objTarget.children.length;
var c, objChild;
for (c = 0; c < childCount; c++) {
objChild = objTarget.children[c];
arrDescendants.push( objChild );
arrDescendants = getAllDescendants( objChild, arrDescendants );
}
} // else this object has no children, return the array unchanged
return arrDescendants;
}
function addLayerContents( objLayer, arrObjects ) {
// Check the global registry of temporal objects; add any which
// belong to the specified layer to the provided array (test by
// checking the ancestry of each object)
//
// NOTE that we're not watching for duplicates here, or for special
// dependencies that a class might impose. Just adding to a list.
var registry = _root.objTemporalObjectRegistry;
var anID, anObject, itsLayer;
var i, testInflection, j, alreadyStored;
for (anID in registry) {
// test each object... does its layer match?
anObject = registry[ anID ];
if (anObject.className != 'PlaySpaceLayer') {
// skip the Layer objects, add all others
itsLayer = anObject.layer;
if (itsLayer == objLayer) {
// it belongs to the specified layer! add it now
arrObjects.push( anObject );
}
}
}
// No need for a return value, since we're modifying an existing array
return;
}
function groomIncomingModelXML( modelNode, arrIncludedLayerNodes, blnConsolidateLayers ) {
/* Here we'll do a number of things to refine the XML of an incoming
* model (one about to be introduced into the Play Space)
*/
var layerCollectionNode = _root.XMLSupportLib.selectSingleNode( modelNode, 'LayerCollection' );
if (layerCollectionNode.nodeName != 'LayerCollection') {
_root.report( "groomIncomingModelXML(): <LayerCollection> node not found!" );
return "groomIncomingModelXML(): <LayerCollection> node not found!";
}
var objectCollectionNode = _root.XMLSupportLib.selectSingleNode( modelNode, 'TemporalObjects' );
if (objectCollectionNode.nodeName != 'TemporalObjects') {
_root.report( "groomIncomingModelXML(): <TemporalObjects> node not found!" );
return "groomIncomingModelXML(): <TemporalObjects> node not found!";
}
// Remove any unwanted Layer nodes, and their associated object nodes
var layerNode, doomedLayerNode, doomedLayerID, objectNode, doomedObjectNode;
layerNode = layerCollectionNode.firstChild;
while (layerNode != null) {
if (!itemFoundInList( layerNode, arrIncludedLayerNodes )) {
// this is not an included layer; delete it and its children!
doomedLayerNode = layerNode;
doomedLayerID = doomedLayerNode.attributes.ID;
_root.report( "Removing unwanted Layer '"+ doomedLayerID +"'..." );
// find and remove all objects in this layer
objectNode = objectCollectionNode.firstChild;
while (objectNode != null) {
if (objectNode.attributes.layerID == testLayerID) {
// Yes, this object belongs to the doomed layer
doomedObjectNode = objectNode;
// step to the next object, then delete this one
objectNode = objectNode.nextSibling;
doomedObjectNode.removeNode();
} else {
// step to the next object node (if any) and continue
objectNode = objectNode.nextSibling;
}
}
// step to the next layer, then delete this one
layerNode = layerNode.nextSibling;
doomedNode.removeNode();
} else {
// this is an included layer, just step to the next one
_root.report( "Keeping Layer '"+ layerNode.attributes.ID +"'..." );
layerNode = layerNode.nextSibling;
}
}
// Now we have a smaller model, just the selected layers and their
// children
_root.report( "Here's the leaner, meaner model:\n"+ modelNode.toString() );
// Assign unique Play Space IDs to all objects in the incoming model,
// except for any layers that we're consolidating (with same-named
// layers currently in the Play Space)
var arrOldIDs = new Array();
var arrNewIDs = new Array();
var nextAvailableID;
// Sweep through the layers, then the objects in this model; we
// should assign a new ID to each, and add the old/new IDs to our
// arrays for later..
// Re-assign all layer IDs
var storedLayerName, p, testPSlayer;
layerNode = layerCollectionNode.firstChild;
while (layerNode != null) {
// Find the next available Play Space ID (e.g. "PlaySpaceLayer_3")
nextAvailableID = null;
if (blnConsolidateLayers) {
// attempt to find a matching (same-named) layer in the Play
// Space, and use its ID for this layer
var storedLayerName = layerNode.attributes.name;
for (p in _root.arrLoadedLayers) { // iterator
testPSlayer = _root.arrLoadedLayers[p];
// Does this existing Play Space layer have the same name
// as our incoming layer?
if (storedLayerName == testPSlayer.name) {
// it's a match! use this ID for objects in this layer
nextAvailableID = testPSlayer.ID;
break; // use the first matching layer name
}
}
}
if (!nextAvailableID) {
// no match found, give it a new ID
nextAvailableID = "PlaySpaceLayer"+ _root._generic_getUniqueID( );
}
// make matching additions (n-th place) in both ID arrays
arrOldIDs.push( layerNode.attributes.ID );
arrNewIDs.push( nextAvailableID );
// reset the node's array to the new one
layerNode.attributes.ID = nextAvailableID;
// step to the next layer, then delete this one
layerNode = layerNode.nextSibling;
}
// Re-assign all object IDs
objectNode = objectCollectionNode.firstChild;
while (objectNode != null) {
// Find the next available Play Space ID (e.g. "Point_23")
nextAvailableID = objectNode.nodeName + _root._generic_getUniqueID( );
// make matching additions (n-th place) in both ID arrays
arrOldIDs.push( objectNode.attributes.ID );
arrNewIDs.push( nextAvailableID );
// reset the node's array to the new one
objectNode.attributes.ID = nextAvailableID;
// step to the next layer, then delete this one
objectNode = objectNode.nextSibling;
}
// OK, now we should have two matched arrays, with old and new IDs
_root.report( "Again, with new core IDs:\n"+ modelNode.toString() );
// Now check relevant links in all objects; reset IDs to their new
// counterparts, or respond if the linked ID isn't among the incoming
// objects; either cull this object, or let its link(s) dangle
objectNode = objectCollectionNode.firstChild;
var oldID, idPos, newID;
while (objectNode != null) {
// Test for link properties (for each class of object), and remap
// them all to our list of new IDs
switch (objectNode.nodeName) {
case 'Instant':
remapToNewIDs(
objectNode,
arrOldIDs,
arrNewIDs,
['parentID', 'layerID', 'childrenIDs', 'inflectionIDs']
);
// step to the next object in any case
objectNode = objectNode.nextSibling;
break;
case 'Event':
remapToNewIDs(
objectNode,
arrOldIDs,
arrNewIDs,
['parentID', 'layerID', 'inflectionIDs']
);
objectNode = objectNode.nextSibling;
break;
case 'Interval':
remapToNewIDs(
objectNode,
arrOldIDs,
arrNewIDs,
['parentID', 'layerID', 'inflectionIDs']
);
objectNode = objectNode.nextSibling;
break;
case 'Axis':
remapToNewIDs(
objectNode,
arrOldIDs,
arrNewIDs,
['parentID', 'layerID', 'childrenIDs', 'inflectionIDs']
);
objectNode = objectNode.nextSibling;
break;
case 'Special':
case 'Cancelled':
remapToNewIDs(
objectNode,
arrOldIDs,
arrNewIDs,
['targetID', 'layerID']
);
// Cull out this inflection if its target object isn't
// loaded
if (objectNode.attributes.targetID == "") {
doomedNode = objectNode;
objectNode = objectNode.nextSibling;
doomedNode.removeNode();
} else {
// step to the next object
objectNode = objectNode.nextSibling;
}
break;
case 'Causality':
remapToNewIDs(
objectNode,
arrOldIDs,
arrNewIDs,
['sourceID', 'targetID', 'layerID']
);
// Cull out this inflection if its source or target objects
// aren't loaded
if ((objectNode.attributes.sourceID == "") ||
(objectNode.attributes.targetID == "")) {
doomedNode = objectNode;
objectNode = objectNode.nextSibling;
doomedNode.removeNode();
} else {
// step to the next object
objectNode = objectNode.nextSibling;
}
break;
default:
_root.report("groomIncomingModelXML(): Unknown object class <"+ objectNode.className +">!");
}
}
_root.report( "Once more, with all new IDs:\n"+ modelNode.toString() );
return modelNode;
}
// Here's a support function that remaps from old to new IDs
function remapToNewIDs( objectNode, arrOldIDs, arrNewIDs, arrIDAttributes ) {
var i, attrName, strOldValue, arrOldValues, j, anOldValue
var k, arrNewValues, strNewValue;
for (i in arrIDAttributes) {
attrName = arrIDAttributes[i];
strOldValue = objectNode.attributes[ attrName ];
// split into an array (just in case it's more than one ID)
arrOldValues = strOldValue.split(',');
arrNewValues = new Array();
for (j in arrOldValues) {
anOldValue = arrOldValues[j];
// look for this in the old-IDs list
for (k in arrOldIDs) {
if (arrOldIDs[k] == anOldValue) {
// found a match in the n-th position! plug in the
// matching ID from the new-IDs list
_root.report("### replacing old value '"+ anOldValue +"' with '"+ arrNewIDs[k] +"'");
arrNewValues.push( arrNewIDs[k] );
break; // abandon inner loop, proceed to next value
}
}
// still here? then no match was found, so omit this value
}
// Re-assemble whatever values were found. The result should be
// either a single ID in a string, or a series of matched,
// comma-delimited IDs, or an empty string (if nothing matched)
strNewValue = arrNewValues.join(',');
_root.report( " new value for '"+ attrName +"' is '"+ strNewValue +"'")
objectNode.attributes[ attrName ] = strNewValue;
}
}
trace("ModelWranglers loaded successfully");
|
package com.platform.entity;
import java.util.Date;
public class InfoVo {
private static final long serialVersionUID = 1L;
//
private Integer id;
//
private long userId;
//
private Date createTime;
//
private String info;
private String name;
private String avatar;
private String nickName;
private String userName;
public String getUserName() {
return userName;
}
public void setUserName(String userName) {
this.userName = userName;
}
public String getAvatar() {
return avatar;
}
public void setAvatar(String avatar) {
this.avatar = avatar;
}
public String getNickName() {
return nickName;
}
public void setNickName(String nickName) {
this.nickName = nickName;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
/**
* 设置:
*/
public void setId(Integer id) {
this.id = id;
}
/**
* 获取:
*/
public Integer getId() {
return id;
}
public long getUserId() {
return userId;
}
public void setUserId(long userId) {
this.userId = userId;
}
/**
* 设置:
*/
public void setCreateTime(Date createTime) {
this.createTime = createTime;
}
/**
* 获取:
*/
public Date getCreateTime() {
return createTime;
}
/**
* 设置:
*/
public void setInfo(String info) {
this.info = info;
}
/**
* 获取:
*/
public String getInfo() {
return info;
}
}
|
/*
* Copyright 2015-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.reactive.routing;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Maps;
import org.onlab.packet.Ethernet;
import org.onlab.packet.IpAddress;
import org.onlab.packet.IpPrefix;
import org.onlab.packet.MacAddress;
import org.onlab.packet.VlanId;
import org.onosproject.core.ApplicationId;
import org.onosproject.net.FilteredConnectPoint;
import org.onosproject.net.intf.Interface;
import org.onosproject.net.intf.InterfaceService;
import org.onosproject.intentsync.IntentSynchronizationService;
import org.onosproject.net.ConnectPoint;
import org.onosproject.net.Host;
import org.onosproject.net.flow.DefaultTrafficSelector;
import org.onosproject.net.flow.DefaultTrafficTreatment;
import org.onosproject.net.flow.TrafficSelector;
import org.onosproject.net.flow.TrafficTreatment;
import org.onosproject.net.host.HostService;
import org.onosproject.net.intent.Constraint;
import org.onosproject.net.intent.Key;
import org.onosproject.net.intent.MultiPointToSinglePointIntent;
import org.onosproject.net.intent.constraint.PartialFailureConstraint;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import static com.google.common.base.Preconditions.checkNotNull;
/**
* FIB component for reactive routing intents.
*/
public class ReactiveRoutingFib implements IntentRequestListener {
private static final int PRIORITY_OFFSET = 100;
private static final int PRIORITY_MULTIPLIER = 5;
protected static final ImmutableList<Constraint> CONSTRAINTS
= ImmutableList.of(new PartialFailureConstraint());
private final Logger log = LoggerFactory.getLogger(getClass());
private final ApplicationId appId;
private final HostService hostService;
private final InterfaceService interfaceService;
private final IntentSynchronizationService intentSynchronizer;
private final Map<IpPrefix, MultiPointToSinglePointIntent> routeIntents;
/**
* Class constructor.
*
* @param appId application ID to use to generate intents
* @param hostService host service
* @param interfaceService interface service
* @param intentSynchronizer intent synchronization service
*/
public ReactiveRoutingFib(ApplicationId appId, HostService hostService,
InterfaceService interfaceService,
IntentSynchronizationService intentSynchronizer) {
this.appId = appId;
this.hostService = hostService;
this.interfaceService = interfaceService;
this.intentSynchronizer = intentSynchronizer;
routeIntents = Maps.newConcurrentMap();
}
@Override
public void setUpConnectivityInternetToHost(IpAddress hostIpAddress) {
checkNotNull(hostIpAddress);
TrafficSelector.Builder selector = DefaultTrafficSelector.builder();
if (hostIpAddress.isIp4()) {
selector.matchEthType(Ethernet.TYPE_IPV4);
} else {
selector.matchEthType(Ethernet.TYPE_IPV6);
}
// Match the destination IP prefix at the first hop
IpPrefix ipPrefix = hostIpAddress.toIpPrefix();
selector.matchIPDst(ipPrefix);
// Rewrite the destination MAC address
MacAddress hostMac = null;
FilteredConnectPoint egressPoint = null;
for (Host host : hostService.getHostsByIp(hostIpAddress)) {
if (host.mac() != null) {
hostMac = host.mac();
egressPoint = new FilteredConnectPoint(host.location());
break;
}
}
if (hostMac == null) {
hostService.startMonitoringIp(hostIpAddress);
return;
}
TrafficTreatment.Builder treatment =
DefaultTrafficTreatment.builder().setEthDst(hostMac);
Key key = Key.of(ipPrefix.toString(), appId);
int priority = ipPrefix.prefixLength() * PRIORITY_MULTIPLIER
+ PRIORITY_OFFSET;
Set<ConnectPoint> interfaceConnectPoints =
interfaceService.getInterfaces().stream()
.map(intf -> intf.connectPoint()).collect(Collectors.toSet());
if (interfaceConnectPoints.isEmpty()) {
log.error("The interface connect points are empty!");
return;
}
Set<FilteredConnectPoint> ingressPoints = new HashSet<>();
for (ConnectPoint connectPoint : interfaceConnectPoints) {
if (!connectPoint.equals(egressPoint.connectPoint())) {
ingressPoints.add(new FilteredConnectPoint(connectPoint));
}
}
MultiPointToSinglePointIntent intent =
MultiPointToSinglePointIntent.builder()
.appId(appId)
.key(key)
.selector(selector.build())
.treatment(treatment.build())
.filteredIngressPoints(ingressPoints)
.filteredEgressPoint(egressPoint)
.priority(priority)
.constraints(CONSTRAINTS)
.build();
log.trace("Generates ConnectivityInternetToHost intent {}", intent);
submitReactiveIntent(ipPrefix, intent);
}
@Override
public void setUpConnectivityHostToInternet(IpAddress hostIp, IpPrefix prefix,
IpAddress nextHopIpAddress) {
// Find the attachment point (egress interface) of the next hop
Interface egressInterface =
interfaceService.getMatchingInterface(nextHopIpAddress);
if (egressInterface == null) {
log.warn("No outgoing interface found for {}",
nextHopIpAddress);
return;
}
Set<Host> hosts = hostService.getHostsByIp(nextHopIpAddress);
if (hosts.isEmpty()) {
log.warn("No host found for next hop IP address");
return;
}
MacAddress nextHopMacAddress = null;
for (Host host : hosts) {
nextHopMacAddress = host.mac();
break;
}
hosts = hostService.getHostsByIp(hostIp);
if (hosts.isEmpty()) {
log.warn("No host found for host IP address");
return;
}
Host host = hosts.stream().findFirst().get();
ConnectPoint ingressPoint = host.location();
// Generate the intent itself
ConnectPoint egressPort = egressInterface.connectPoint();
log.debug("Generating intent for prefix {}, next hop mac {}",
prefix, nextHopMacAddress);
// Match the destination IP prefix at the first hop
TrafficSelector.Builder selector = DefaultTrafficSelector.builder();
if (prefix.isIp4()) {
selector.matchEthType(Ethernet.TYPE_IPV4);
selector.matchIPDst(prefix);
} else {
selector.matchEthType(Ethernet.TYPE_IPV6);
selector.matchIPv6Dst(prefix);
}
// Rewrite the destination MAC address
TrafficTreatment.Builder treatment = DefaultTrafficTreatment.builder()
.setEthDst(nextHopMacAddress);
if (!egressInterface.vlan().equals(VlanId.NONE)) {
treatment.setVlanId(egressInterface.vlan());
// If we set VLAN ID, we have to make sure a VLAN tag exists.
// TODO support no VLAN -> VLAN routing
selector.matchVlanId(VlanId.ANY);
}
int priority = prefix.prefixLength() * PRIORITY_MULTIPLIER + PRIORITY_OFFSET;
Key key = Key.of(prefix.toString() + "-reactive", appId);
MultiPointToSinglePointIntent intent = MultiPointToSinglePointIntent.builder()
.appId(appId)
.key(key)
.selector(selector.build())
.treatment(treatment.build())
.filteredIngressPoints(Collections.singleton(new FilteredConnectPoint(ingressPoint)))
.filteredEgressPoint(new FilteredConnectPoint(egressPort))
.priority(priority)
.constraints(CONSTRAINTS)
.build();
submitReactiveIntent(prefix, intent);
}
@Override
public void setUpConnectivityHostToHost(IpAddress dstIpAddress,
IpAddress srcIpAddress,
MacAddress srcMacAddress,
ConnectPoint srcConnectPoint) {
checkNotNull(dstIpAddress);
checkNotNull(srcIpAddress);
checkNotNull(srcMacAddress);
checkNotNull(srcConnectPoint);
IpPrefix srcIpPrefix = srcIpAddress.toIpPrefix();
IpPrefix dstIpPrefix = dstIpAddress.toIpPrefix();
ConnectPoint dstConnectPoint = null;
MacAddress dstMacAddress = null;
for (Host host : hostService.getHostsByIp(dstIpAddress)) {
if (host.mac() != null) {
dstMacAddress = host.mac();
dstConnectPoint = host.location();
break;
}
}
if (dstMacAddress == null) {
hostService.startMonitoringIp(dstIpAddress);
return;
}
//
// Handle intent from source host to destination host
//
MultiPointToSinglePointIntent srcToDstIntent =
hostToHostIntentGenerator(dstIpAddress, dstConnectPoint,
dstMacAddress, srcConnectPoint);
submitReactiveIntent(dstIpPrefix, srcToDstIntent);
//
// Handle intent from destination host to source host
//
// Since we proactively handle the intent from destination host to
// source host, we should check whether there is an exiting intent
// first.
if (mp2pIntentExists(srcIpPrefix)) {
updateExistingMp2pIntent(srcIpPrefix, dstConnectPoint);
return;
} else {
// There is no existing intent, create a new one.
MultiPointToSinglePointIntent dstToSrcIntent =
hostToHostIntentGenerator(srcIpAddress, srcConnectPoint,
srcMacAddress, dstConnectPoint);
submitReactiveIntent(srcIpPrefix, dstToSrcIntent);
}
}
/**
* Generates MultiPointToSinglePointIntent for both source host and
* destination host located in local SDN network.
*
* @param dstIpAddress the destination IP address
* @param dstConnectPoint the destination host connect point
* @param dstMacAddress the MAC address of destination host
* @param srcConnectPoint the connect point where packet-in from
* @return the generated MultiPointToSinglePointIntent
*/
private MultiPointToSinglePointIntent hostToHostIntentGenerator(
IpAddress dstIpAddress,
ConnectPoint dstConnectPoint,
MacAddress dstMacAddress,
ConnectPoint srcConnectPoint) {
checkNotNull(dstIpAddress);
checkNotNull(dstConnectPoint);
checkNotNull(dstMacAddress);
checkNotNull(srcConnectPoint);
Set<FilteredConnectPoint> ingressPoints = new HashSet<>();
ingressPoints.add(new FilteredConnectPoint(srcConnectPoint));
IpPrefix dstIpPrefix = dstIpAddress.toIpPrefix();
TrafficSelector.Builder selector = DefaultTrafficSelector.builder();
if (dstIpAddress.isIp4()) {
selector.matchEthType(Ethernet.TYPE_IPV4);
selector.matchIPDst(dstIpPrefix);
} else {
selector.matchEthType(Ethernet.TYPE_IPV6);
selector.matchIPv6Dst(dstIpPrefix);
}
// Rewrite the destination MAC address
TrafficTreatment.Builder treatment =
DefaultTrafficTreatment.builder().setEthDst(dstMacAddress);
Key key = Key.of(dstIpPrefix.toString(), appId);
int priority = dstIpPrefix.prefixLength() * PRIORITY_MULTIPLIER
+ PRIORITY_OFFSET;
MultiPointToSinglePointIntent intent =
MultiPointToSinglePointIntent.builder()
.appId(appId)
.key(key)
.selector(selector.build())
.treatment(treatment.build())
.filteredIngressPoints(ingressPoints)
.filteredEgressPoint(new FilteredConnectPoint(dstConnectPoint))
.priority(priority)
.constraints(CONSTRAINTS)
.build();
log.trace("Generates ConnectivityHostToHost = {} ", intent);
return intent;
}
@Override
public void updateExistingMp2pIntent(IpPrefix ipPrefix,
ConnectPoint ingressConnectPoint) {
checkNotNull(ipPrefix);
checkNotNull(ingressConnectPoint);
MultiPointToSinglePointIntent existingIntent =
getExistingMp2pIntent(ipPrefix);
if (existingIntent != null) {
Set<FilteredConnectPoint> ingressPoints = existingIntent.filteredIngressPoints();
// Add host connect point into ingressPoints of the existing intent
if (ingressPoints.add(new FilteredConnectPoint(ingressConnectPoint))) {
MultiPointToSinglePointIntent updatedMp2pIntent =
MultiPointToSinglePointIntent.builder()
.appId(appId)
.key(existingIntent.key())
.selector(existingIntent.selector())
.treatment(existingIntent.treatment())
.filteredIngressPoints(ingressPoints)
.filteredEgressPoint(existingIntent.filteredEgressPoint())
.priority(existingIntent.priority())
.constraints(CONSTRAINTS)
.build();
log.trace("Update an existing MultiPointToSinglePointIntent "
+ "to new intent = {} ", updatedMp2pIntent);
submitReactiveIntent(ipPrefix, updatedMp2pIntent);
}
// If adding ingressConnectPoint to ingressPoints failed, it
// because between the time interval from checking existing intent
// to generating new intent, onos updated this intent due to other
// packet-in and the new intent also includes the
// ingressConnectPoint. This will not affect reactive routing.
}
}
/**
* Submits a reactive intent to the intent synchronizer.
*
* @param ipPrefix IP prefix of the intent
* @param intent intent to submit
*/
void submitReactiveIntent(IpPrefix ipPrefix, MultiPointToSinglePointIntent intent) {
routeIntents.put(ipPrefix, intent);
intentSynchronizer.submit(intent);
}
/**
* Gets the existing MultiPointToSinglePointIntent from memory for a given
* IP prefix.
*
* @param ipPrefix the IP prefix used to find MultiPointToSinglePointIntent
* @return the MultiPointToSinglePointIntent if found, otherwise null
*/
private MultiPointToSinglePointIntent getExistingMp2pIntent(IpPrefix ipPrefix) {
checkNotNull(ipPrefix);
return routeIntents.get(ipPrefix);
}
@Override
public boolean mp2pIntentExists(IpPrefix ipPrefix) {
checkNotNull(ipPrefix);
return routeIntents.get(ipPrefix) != null;
}
}
|
<gh_stars>10-100
const { DEFINITIONS_DIR,DASHBOARD_FILE_NAME_SUFFIX,FILE_ENCODING} = require('./props');
const {readdir} = require('fs').promises;
const path = require('path');
const fs = require('fs')
const utils = require('./utils');
(async () => {
const folderDefinitions = await readdir(DEFINITIONS_DIR, {withFileTypes: true})
for (let folderDefinition of folderDefinitions){
if(folderDefinition.isDirectory()){
const folderName = folderDefinition.name
const files = await readdir(path.resolve(DEFINITIONS_DIR, folderDefinition.name), {withFileTypes: true})
for(let file of files) {
if(file.name.includes(DASHBOARD_FILE_NAME_SUFFIX)){
let filePath = path.resolve(DEFINITIONS_DIR, folderName, file.name)
let fileContent = fs.readFileSync(filePath, FILE_ENCODING)
let newFileContent = utils.sanitizeDashboard(fileContent)
if(newFileContent !== fileContent){
fs.writeFile(filePath, newFileContent, FILE_ENCODING, function (err) {
if (err){
console.error(`Error updating ${file.name} in ${folderName}`)
process.exit(1)
} else{
console.log(`=> Sanitized ${file.name} in ${folderName}`)
}
});
}
}
}
}
}
})();
|
<filename>web/src/pages/analysis/history.js<gh_stars>0
'use strict';
import moment from 'moment/src/moment.js';
import page from './history.html';
import $ from '/jquery.js';
import _ from 'lodash';
import * as Util from '/util.js';
import * as Api from '/moneydb.js';
import * as Common from '/common.js';
import * as c3 from 'c3';
export let location;
export let absolute;
export let ppm;
export let startDate;
export let endDate;
let chart;
let data;
let overviewData;
export function init(loc) {
location = $(loc);
location.empty().html(page);
startDate = moment().subtract(1, 'year');
endDate = moment();
$('#range-from', location).calendar({
type: 'date',
today: true,
firstDayOfWeek: 1,
formatter: {
date: function(d) {
return moment(d).format('LL');
}
},
parser: {
date: function(d) {
return moment(d, 'LL').toDate();
}
},
text: {
days: ['S', 'M', 'D', 'M', 'D', 'F', 'S'],
months: ['Januar', 'Februar', 'März', 'April', 'Mai', 'Juni', 'Juli', 'August', 'September', 'Oktober', 'November', 'Dezember'],
monthsShort: ['Jan', 'Feb', 'Mär', 'Apr', 'Mai', 'Jun', 'Jul', 'Aug', 'Sep', 'Okt', 'Nov', 'Dez'],
today: 'Heute',
now: 'Jetzt'
},
onChange: function(date) {
startDate = moment(moment(date).format('L'), 'L');
refresh();
}
}).calendar('set date', startDate.toDate(), true, false);
$('#range-to', location).calendar({
type: 'date',
today: true,
firstDayOfWeek: 1,
formatter: {
date: function(d) {
return moment(d).format('LL');
}
},
parser: {
date: function(d) {
return moment(d, 'LL').toDate();
}
},
text: {
days: ['S', 'M', 'D', 'M', 'D', 'F', 'S'],
months: ['Januar', 'Februar', 'März', 'April', 'Mai', 'Juni', 'Juli', 'August', 'September', 'Oktober', 'November', 'Dezember'],
monthsShort: ['Jan', 'Feb', 'Mär', 'Apr', 'Mai', 'Jun', 'Jul', 'Aug', 'Sep', 'Okt', 'Nov', 'Dez'],
today: 'Heute',
now: 'Jetzt'
},
onChange: function(date) {
endDate = moment(moment(date).format('L'), 'L');
refresh();
}
}).calendar('set date', endDate.toDate(), true, false);
chart = c3.generate({
bindto: $('#chart', location)[0],
data: {
x: 'x',
columns: []
},
size: {
height: 520
//, width: 480
},
axis: {
y: {
tick: {
format: function(value) {
return value.toFixed(0) + ' €';
}
}
},
x: {
type: 'timeseries',
tick: {
format: function(value) {
return moment(value).format('L');
}
}
}
},
grid: {
x: {
show: false
},
y: {
show: false
}
},
tooltip: {
format: {
title: function(d) {
if (absolute)
return moment(d).format('LL LT');
else {
let period = moment(d).diff(moment(d).subtract(1, 'months')) / ppm;
let v2 = moment(d).subtract(period, 'milliseconds');
return v2.format('L LT') + ' bis ' + moment(d).format('L LT');
}
},
value: function(value) {
return Util.formatAmount(value);
}
}
}
});
$('#show-all', location).click(() => chart.show());
$('#hide-all', location).click(() => chart.hide());
$('#interval', location).find('.checkbox').checkbox({
onChecked: function() {
refresh();
}
});
$('#display-type', location).find('.checkbox').checkbox({
onChecked: function() {
refresh();
}
});
$('#range', location).find('.checkbox').checkbox({
onChecked: function() {
refresh();
if (!$('#range-custom', location).parent().checkbox('is checked'))
$('#range .input', location).addClass('disabled');
else
$('#range .input', location).removeClass('disabled');
}
});
$('#group-by', location).find('.checkbox').checkbox({
onChecked: function() {
// reuse old data (or do nothing)
if (typeof data != 'undefined')
updateChart(data);
}
});
$('#range-firstbalance', location).parent().checkbox('set checked');
$('#absolute', location).parent().checkbox('set checked');
$('#group-by-availability', location).parent().checkbox('set checked');
$('#range .input', location).addClass('disabled');
$('#interval-monthly', location).parent().checkbox('set checked');
}
const updateChart = function(d) {
data = d;
let byAvail = $('#group-by-availability', location).parent().checkbox('is checked');
let showAbsoluteValues = $('#absolute', location).parent().checkbox('is checked');
let columns;
let colorDict;
if (byAvail) {
colorDict = {};
columns = [
['x'],
['Sofort'],
['≤ Wochen'],
['≤ Monate'],
['≤ Jahre'],
['≤ Jahrzehnte']
];
if (Array.isArray(data)) {
// monthly data from overview table
for (let i = data.length - (showAbsoluteValues ? 1 : 2); i >= 0; i--) {
columns[0].push(moment(data[i].endDate).toDate());
for (let j = 1; j < columns.length; j++)
if (showAbsoluteValues)
columns[j].push(data[i].endBalances.total[j - 1].amount);
else
columns[j].push(data[i].endBalances.total[j - 1].amount - data[i + 1].endBalances.total[j - 1].amount);
}
} else {
// requested data
for (let i = data.dates.length - (showAbsoluteValues ? 1 : 2); i >= 0; i--) {
columns[0].push(moment(data.dates[i]).toDate());
for (let j = 1; j < columns.length; j++)
if (showAbsoluteValues)
columns[j].push(data.byAvailability[j - 1].data[i]);
else
columns[j].push(data.byAvailability[j - 1].data[i] - data.byAvailability[j - 1].data[i + 1]);
}
}
} else {
colorDict = {};
columns = [];
if (Array.isArray(data)) {
_.forEach(data[0].endBalances.balances, function(v) {
let acc = Common.findAccount(v.accountId);
colorDict[acc.title] = acc.color; // tinycolor(acc.color).saturate(20).toString();
columns.push([acc.title]);
});
columns.unshift(['x']);
// monthly data from overview table
for (let i = data.length - (showAbsoluteValues ? 1 : 2); i >= 0; i--) {
columns[0].push(moment(data[i].endDate).toDate());
for (let j = 1; j < columns.length; j++)
if (showAbsoluteValues)
columns[j].push(data[i].endBalances.balances[j - 1].amount);
else
columns[j].push(data[i].endBalances.balances[j - 1].amount - data[i + 1].endBalances.balances[j - 1].amount);
}
} else {
_.forEach(data.byAccount, function(v) {
let acc = Common.findAccount(v.id);
colorDict[acc.title] = acc.color; // tinycolor(acc.color).saturate(20).toString();
columns.push([acc.title]);
});
columns.unshift(['x']);
for (let i = data.dates.length - (showAbsoluteValues ? 1 : 2); i >= 0; i--) {
columns[0].push(moment(data.dates[i]).toDate());
for (let j = 1; j < columns.length; j++)
if (showAbsoluteValues)
columns[j].push(data.byAccount[j - 1].data[i]);
else
columns[j].push(data.byAccount[j - 1].data[i] - data.byAccount[j - 1].data[i + 1]);
}
}
}
let firstLoad = chart.data().length == 0;
absolute = showAbsoluteValues;
chart.load({
x: 'x',
columns: columns,
unload: true,
done: function() {
if (firstLoad && byAvail)
chart.hide(['≤ Wochen', '≤ Monate', '≤ Jahre']);
},
// type: showAbsoluteValues ? (columns[0].length > 100 ? 'area' : 'area-spline') : 'bar',
type: columns[0].length > 100 ? 'area' : 'area-spline',
colors: colorDict,
groups: []
});
};
export function refresh(ovData) {
if (typeof ovData !== 'undefined')
overviewData = ovData;
if (typeof overviewData !== undefined && $('#interval-monthly', location).parent().checkbox('is checked')) {
ppm = 1;
if ($('#range-firstexpense', location).parent().checkbox('is checked')) {
let length = overviewData.length;
for (let i = overviewData.length - 1; i >= 0 && overviewData[i].expenseCount == 0; i--)
length--;
updateChart(_.take(overviewData, length));
} else if ($('#range-firstbalance', location).parent().checkbox('is checked')) {
updateChart(overviewData);
} else {
updateChart(_.filter(overviewData, x => moment(x.endDate).isBetween(startDate, endDate, 'day', '[]')));
}
} else {
let sD = startDate;
let eD = endDate;
if (!$('#range-custom', location).parent().checkbox('is checked') && typeof overviewData !== 'undefined') {
eD = moment(overviewData[0].endDate);
let length = overviewData.length;
if ($('#range-firstexpense', location).parent().checkbox('is checked')) {
for (let i = overviewData.length - 1; i >= 0 && overviewData[i].expenseCount == 0; i--)
length--;
}
sD = moment(overviewData[length - 1].endDate);
}
let ppm_ = 1;
let id = $('#interval', location).find('.checked').children().attr('id');
switch (id) {
case 'interval-monthly': // should use ovData, but okay ...
ppm_ = 1;
break;
case 'interval-twoweekly':
ppm_ = 2;
break;
case 'interval-weekly':
ppm_ = 4;
break;
case 'interval-biweekly':
ppm_ = 8;
break;
case 'interval-daily':
ppm_ = 30;
break;
default:
Util.errorMsg('Invalid interval, id=' + id);
}
Api.getComputeSimulations('Both', sD.toISOString(), eD.toISOString(), ppm_, function(e) {
ppm = ppm_;
updateChart(e);
}, function(e) {
Util.errorMsg('Anfragen der berechneten Kontostände schlug fehl.', e);
});
}
}
|
<reponame>R-ripDao/protocol-metric-subgraph<filename>generated/sRipProtocol/RipProtocolStaking.ts
// THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
import {
ethereum,
JSONValue,
TypedMap,
Entity,
Bytes,
Address,
BigInt
} from "@graphprotocol/graph-ts";
export class AuthorityUpdated extends ethereum.Event {
get params(): AuthorityUpdated__Params {
return new AuthorityUpdated__Params(this);
}
}
export class AuthorityUpdated__Params {
_event: AuthorityUpdated;
constructor(event: AuthorityUpdated) {
this._event = event;
}
get authority(): Address {
return this._event.parameters[0].value.toAddress();
}
}
export class DistributorSet extends ethereum.Event {
get params(): DistributorSet__Params {
return new DistributorSet__Params(this);
}
}
export class DistributorSet__Params {
_event: DistributorSet;
constructor(event: DistributorSet) {
this._event = event;
}
get distributor(): Address {
return this._event.parameters[0].value.toAddress();
}
}
export class WarmupSet extends ethereum.Event {
get params(): WarmupSet__Params {
return new WarmupSet__Params(this);
}
}
export class WarmupSet__Params {
_event: WarmupSet;
constructor(event: WarmupSet) {
this._event = event;
}
get warmup(): BigInt {
return this._event.parameters[0].value.toBigInt();
}
}
export class RipProtocolStaking__epochResult {
value0: BigInt;
value1: BigInt;
value2: BigInt;
value3: BigInt;
constructor(value0: BigInt, value1: BigInt, value2: BigInt, value3: BigInt) {
this.value0 = value0;
this.value1 = value1;
this.value2 = value2;
this.value3 = value3;
}
toMap(): TypedMap<string, ethereum.Value> {
let map = new TypedMap<string, ethereum.Value>();
map.set("value0", ethereum.Value.fromUnsignedBigInt(this.value0));
map.set("value1", ethereum.Value.fromUnsignedBigInt(this.value1));
map.set("value2", ethereum.Value.fromUnsignedBigInt(this.value2));
map.set("value3", ethereum.Value.fromUnsignedBigInt(this.value3));
return map;
}
}
export class RipProtocolStaking__warmupInfoResult {
value0: BigInt;
value1: BigInt;
value2: BigInt;
value3: boolean;
constructor(value0: BigInt, value1: BigInt, value2: BigInt, value3: boolean) {
this.value0 = value0;
this.value1 = value1;
this.value2 = value2;
this.value3 = value3;
}
toMap(): TypedMap<string, ethereum.Value> {
let map = new TypedMap<string, ethereum.Value>();
map.set("value0", ethereum.Value.fromUnsignedBigInt(this.value0));
map.set("value1", ethereum.Value.fromUnsignedBigInt(this.value1));
map.set("value2", ethereum.Value.fromUnsignedBigInt(this.value2));
map.set("value3", ethereum.Value.fromBoolean(this.value3));
return map;
}
}
export class RipProtocolStaking extends ethereum.SmartContract {
static bind(address: Address): RipProtocolStaking {
return new RipProtocolStaking("RipProtocolStaking", address);
}
RIP(): Address {
let result = super.call("RIP", "RIP():(address)", []);
return result[0].toAddress();
}
try_RIP(): ethereum.CallResult<Address> {
let result = super.tryCall("RIP", "RIP():(address)", []);
if (result.reverted) {
return new ethereum.CallResult();
}
let value = result.value;
return ethereum.CallResult.fromValue(value[0].toAddress());
}
authority(): Address {
let result = super.call("authority", "authority():(address)", []);
return result[0].toAddress();
}
try_authority(): ethereum.CallResult<Address> {
let result = super.tryCall("authority", "authority():(address)", []);
if (result.reverted) {
return new ethereum.CallResult();
}
let value = result.value;
return ethereum.CallResult.fromValue(value[0].toAddress());
}
claim(_to: Address, _rebasing: boolean): BigInt {
let result = super.call("claim", "claim(address,bool):(uint256)", [
ethereum.Value.fromAddress(_to),
ethereum.Value.fromBoolean(_rebasing)
]);
return result[0].toBigInt();
}
try_claim(_to: Address, _rebasing: boolean): ethereum.CallResult<BigInt> {
let result = super.tryCall("claim", "claim(address,bool):(uint256)", [
ethereum.Value.fromAddress(_to),
ethereum.Value.fromBoolean(_rebasing)
]);
if (result.reverted) {
return new ethereum.CallResult();
}
let value = result.value;
return ethereum.CallResult.fromValue(value[0].toBigInt());
}
distributor(): Address {
let result = super.call("distributor", "distributor():(address)", []);
return result[0].toAddress();
}
try_distributor(): ethereum.CallResult<Address> {
let result = super.tryCall("distributor", "distributor():(address)", []);
if (result.reverted) {
return new ethereum.CallResult();
}
let value = result.value;
return ethereum.CallResult.fromValue(value[0].toAddress());
}
epoch(): RipProtocolStaking__epochResult {
let result = super.call(
"epoch",
"epoch():(uint256,uint256,uint256,uint256)",
[]
);
return new RipProtocolStaking__epochResult(
result[0].toBigInt(),
result[1].toBigInt(),
result[2].toBigInt(),
result[3].toBigInt()
);
}
try_epoch(): ethereum.CallResult<RipProtocolStaking__epochResult> {
let result = super.tryCall(
"epoch",
"epoch():(uint256,uint256,uint256,uint256)",
[]
);
if (result.reverted) {
return new ethereum.CallResult();
}
let value = result.value;
return ethereum.CallResult.fromValue(
new RipProtocolStaking__epochResult(
value[0].toBigInt(),
value[1].toBigInt(),
value[2].toBigInt(),
value[3].toBigInt()
)
);
}
forfeit(): BigInt {
let result = super.call("forfeit", "forfeit():(uint256)", []);
return result[0].toBigInt();
}
try_forfeit(): ethereum.CallResult<BigInt> {
let result = super.tryCall("forfeit", "forfeit():(uint256)", []);
if (result.reverted) {
return new ethereum.CallResult();
}
let value = result.value;
return ethereum.CallResult.fromValue(value[0].toBigInt());
}
gRIP(): Address {
let result = super.call("gRIP", "gRIP():(address)", []);
return result[0].toAddress();
}
try_gRIP(): ethereum.CallResult<Address> {
let result = super.tryCall("gRIP", "gRIP():(address)", []);
if (result.reverted) {
return new ethereum.CallResult();
}
let value = result.value;
return ethereum.CallResult.fromValue(value[0].toAddress());
}
index(): BigInt {
let result = super.call("index", "index():(uint256)", []);
return result[0].toBigInt();
}
try_index(): ethereum.CallResult<BigInt> {
let result = super.tryCall("index", "index():(uint256)", []);
if (result.reverted) {
return new ethereum.CallResult();
}
let value = result.value;
return ethereum.CallResult.fromValue(value[0].toBigInt());
}
rebase(): BigInt {
let result = super.call("rebase", "rebase():(uint256)", []);
return result[0].toBigInt();
}
try_rebase(): ethereum.CallResult<BigInt> {
let result = super.tryCall("rebase", "rebase():(uint256)", []);
if (result.reverted) {
return new ethereum.CallResult();
}
let value = result.value;
return ethereum.CallResult.fromValue(value[0].toBigInt());
}
sRIP(): Address {
let result = super.call("sRIP", "sRIP():(address)", []);
return result[0].toAddress();
}
try_sRIP(): ethereum.CallResult<Address> {
let result = super.tryCall("sRIP", "sRIP():(address)", []);
if (result.reverted) {
return new ethereum.CallResult();
}
let value = result.value;
return ethereum.CallResult.fromValue(value[0].toAddress());
}
secondsToNextEpoch(): BigInt {
let result = super.call(
"secondsToNextEpoch",
"secondsToNextEpoch():(uint256)",
[]
);
return result[0].toBigInt();
}
try_secondsToNextEpoch(): ethereum.CallResult<BigInt> {
let result = super.tryCall(
"secondsToNextEpoch",
"secondsToNextEpoch():(uint256)",
[]
);
if (result.reverted) {
return new ethereum.CallResult();
}
let value = result.value;
return ethereum.CallResult.fromValue(value[0].toBigInt());
}
stake(
_to: Address,
_amount: BigInt,
_rebasing: boolean,
_claim: boolean
): BigInt {
let result = super.call(
"stake",
"stake(address,uint256,bool,bool):(uint256)",
[
ethereum.Value.fromAddress(_to),
ethereum.Value.fromUnsignedBigInt(_amount),
ethereum.Value.fromBoolean(_rebasing),
ethereum.Value.fromBoolean(_claim)
]
);
return result[0].toBigInt();
}
try_stake(
_to: Address,
_amount: BigInt,
_rebasing: boolean,
_claim: boolean
): ethereum.CallResult<BigInt> {
let result = super.tryCall(
"stake",
"stake(address,uint256,bool,bool):(uint256)",
[
ethereum.Value.fromAddress(_to),
ethereum.Value.fromUnsignedBigInt(_amount),
ethereum.Value.fromBoolean(_rebasing),
ethereum.Value.fromBoolean(_claim)
]
);
if (result.reverted) {
return new ethereum.CallResult();
}
let value = result.value;
return ethereum.CallResult.fromValue(value[0].toBigInt());
}
supplyInWarmup(): BigInt {
let result = super.call("supplyInWarmup", "supplyInWarmup():(uint256)", []);
return result[0].toBigInt();
}
try_supplyInWarmup(): ethereum.CallResult<BigInt> {
let result = super.tryCall(
"supplyInWarmup",
"supplyInWarmup():(uint256)",
[]
);
if (result.reverted) {
return new ethereum.CallResult();
}
let value = result.value;
return ethereum.CallResult.fromValue(value[0].toBigInt());
}
unstake(
_to: Address,
_amount: BigInt,
_trigger: boolean,
_rebasing: boolean
): BigInt {
let result = super.call(
"unstake",
"unstake(address,uint256,bool,bool):(uint256)",
[
ethereum.Value.fromAddress(_to),
ethereum.Value.fromUnsignedBigInt(_amount),
ethereum.Value.fromBoolean(_trigger),
ethereum.Value.fromBoolean(_rebasing)
]
);
return result[0].toBigInt();
}
try_unstake(
_to: Address,
_amount: BigInt,
_trigger: boolean,
_rebasing: boolean
): ethereum.CallResult<BigInt> {
let result = super.tryCall(
"unstake",
"unstake(address,uint256,bool,bool):(uint256)",
[
ethereum.Value.fromAddress(_to),
ethereum.Value.fromUnsignedBigInt(_amount),
ethereum.Value.fromBoolean(_trigger),
ethereum.Value.fromBoolean(_rebasing)
]
);
if (result.reverted) {
return new ethereum.CallResult();
}
let value = result.value;
return ethereum.CallResult.fromValue(value[0].toBigInt());
}
unwrap(_to: Address, _amount: BigInt): BigInt {
let result = super.call("unwrap", "unwrap(address,uint256):(uint256)", [
ethereum.Value.fromAddress(_to),
ethereum.Value.fromUnsignedBigInt(_amount)
]);
return result[0].toBigInt();
}
try_unwrap(_to: Address, _amount: BigInt): ethereum.CallResult<BigInt> {
let result = super.tryCall("unwrap", "unwrap(address,uint256):(uint256)", [
ethereum.Value.fromAddress(_to),
ethereum.Value.fromUnsignedBigInt(_amount)
]);
if (result.reverted) {
return new ethereum.CallResult();
}
let value = result.value;
return ethereum.CallResult.fromValue(value[0].toBigInt());
}
warmupInfo(param0: Address): RipProtocolStaking__warmupInfoResult {
let result = super.call(
"warmupInfo",
"warmupInfo(address):(uint256,uint256,uint256,bool)",
[ethereum.Value.fromAddress(param0)]
);
return new RipProtocolStaking__warmupInfoResult(
result[0].toBigInt(),
result[1].toBigInt(),
result[2].toBigInt(),
result[3].toBoolean()
);
}
try_warmupInfo(
param0: Address
): ethereum.CallResult<RipProtocolStaking__warmupInfoResult> {
let result = super.tryCall(
"warmupInfo",
"warmupInfo(address):(uint256,uint256,uint256,bool)",
[ethereum.Value.fromAddress(param0)]
);
if (result.reverted) {
return new ethereum.CallResult();
}
let value = result.value;
return ethereum.CallResult.fromValue(
new RipProtocolStaking__warmupInfoResult(
value[0].toBigInt(),
value[1].toBigInt(),
value[2].toBigInt(),
value[3].toBoolean()
)
);
}
warmupPeriod(): BigInt {
let result = super.call("warmupPeriod", "warmupPeriod():(uint256)", []);
return result[0].toBigInt();
}
try_warmupPeriod(): ethereum.CallResult<BigInt> {
let result = super.tryCall("warmupPeriod", "warmupPeriod():(uint256)", []);
if (result.reverted) {
return new ethereum.CallResult();
}
let value = result.value;
return ethereum.CallResult.fromValue(value[0].toBigInt());
}
wrap(_to: Address, _amount: BigInt): BigInt {
let result = super.call("wrap", "wrap(address,uint256):(uint256)", [
ethereum.Value.fromAddress(_to),
ethereum.Value.fromUnsignedBigInt(_amount)
]);
return result[0].toBigInt();
}
try_wrap(_to: Address, _amount: BigInt): ethereum.CallResult<BigInt> {
let result = super.tryCall("wrap", "wrap(address,uint256):(uint256)", [
ethereum.Value.fromAddress(_to),
ethereum.Value.fromUnsignedBigInt(_amount)
]);
if (result.reverted) {
return new ethereum.CallResult();
}
let value = result.value;
return ethereum.CallResult.fromValue(value[0].toBigInt());
}
}
export class ConstructorCall extends ethereum.Call {
get inputs(): ConstructorCall__Inputs {
return new ConstructorCall__Inputs(this);
}
get outputs(): ConstructorCall__Outputs {
return new ConstructorCall__Outputs(this);
}
}
export class ConstructorCall__Inputs {
_call: ConstructorCall;
constructor(call: ConstructorCall) {
this._call = call;
}
get _rip(): Address {
return this._call.inputValues[0].value.toAddress();
}
get _sRIP(): Address {
return this._call.inputValues[1].value.toAddress();
}
get _gRIP(): Address {
return this._call.inputValues[2].value.toAddress();
}
get _epochLength(): BigInt {
return this._call.inputValues[3].value.toBigInt();
}
get _firstEpochNumber(): BigInt {
return this._call.inputValues[4].value.toBigInt();
}
get _firstEpochTime(): BigInt {
return this._call.inputValues[5].value.toBigInt();
}
get _authority(): Address {
return this._call.inputValues[6].value.toAddress();
}
}
export class ConstructorCall__Outputs {
_call: ConstructorCall;
constructor(call: ConstructorCall) {
this._call = call;
}
}
export class ClaimCall extends ethereum.Call {
get inputs(): ClaimCall__Inputs {
return new ClaimCall__Inputs(this);
}
get outputs(): ClaimCall__Outputs {
return new ClaimCall__Outputs(this);
}
}
export class ClaimCall__Inputs {
_call: ClaimCall;
constructor(call: ClaimCall) {
this._call = call;
}
get _to(): Address {
return this._call.inputValues[0].value.toAddress();
}
get _rebasing(): boolean {
return this._call.inputValues[1].value.toBoolean();
}
}
export class ClaimCall__Outputs {
_call: ClaimCall;
constructor(call: ClaimCall) {
this._call = call;
}
get value0(): BigInt {
return this._call.outputValues[0].value.toBigInt();
}
}
export class ForfeitCall extends ethereum.Call {
get inputs(): ForfeitCall__Inputs {
return new ForfeitCall__Inputs(this);
}
get outputs(): ForfeitCall__Outputs {
return new ForfeitCall__Outputs(this);
}
}
export class ForfeitCall__Inputs {
_call: ForfeitCall;
constructor(call: ForfeitCall) {
this._call = call;
}
}
export class ForfeitCall__Outputs {
_call: ForfeitCall;
constructor(call: ForfeitCall) {
this._call = call;
}
get value0(): BigInt {
return this._call.outputValues[0].value.toBigInt();
}
}
export class RebaseCall extends ethereum.Call {
get inputs(): RebaseCall__Inputs {
return new RebaseCall__Inputs(this);
}
get outputs(): RebaseCall__Outputs {
return new RebaseCall__Outputs(this);
}
}
export class RebaseCall__Inputs {
_call: RebaseCall;
constructor(call: RebaseCall) {
this._call = call;
}
}
export class RebaseCall__Outputs {
_call: RebaseCall;
constructor(call: RebaseCall) {
this._call = call;
}
get value0(): BigInt {
return this._call.outputValues[0].value.toBigInt();
}
}
export class SetAuthorityCall extends ethereum.Call {
get inputs(): SetAuthorityCall__Inputs {
return new SetAuthorityCall__Inputs(this);
}
get outputs(): SetAuthorityCall__Outputs {
return new SetAuthorityCall__Outputs(this);
}
}
export class SetAuthorityCall__Inputs {
_call: SetAuthorityCall;
constructor(call: SetAuthorityCall) {
this._call = call;
}
get _newAuthority(): Address {
return this._call.inputValues[0].value.toAddress();
}
}
export class SetAuthorityCall__Outputs {
_call: SetAuthorityCall;
constructor(call: SetAuthorityCall) {
this._call = call;
}
}
export class SetDistributorCall extends ethereum.Call {
get inputs(): SetDistributorCall__Inputs {
return new SetDistributorCall__Inputs(this);
}
get outputs(): SetDistributorCall__Outputs {
return new SetDistributorCall__Outputs(this);
}
}
export class SetDistributorCall__Inputs {
_call: SetDistributorCall;
constructor(call: SetDistributorCall) {
this._call = call;
}
get _distributor(): Address {
return this._call.inputValues[0].value.toAddress();
}
}
export class SetDistributorCall__Outputs {
_call: SetDistributorCall;
constructor(call: SetDistributorCall) {
this._call = call;
}
}
export class SetWarmupLengthCall extends ethereum.Call {
get inputs(): SetWarmupLengthCall__Inputs {
return new SetWarmupLengthCall__Inputs(this);
}
get outputs(): SetWarmupLengthCall__Outputs {
return new SetWarmupLengthCall__Outputs(this);
}
}
export class SetWarmupLengthCall__Inputs {
_call: SetWarmupLengthCall;
constructor(call: SetWarmupLengthCall) {
this._call = call;
}
get _warmupPeriod(): BigInt {
return this._call.inputValues[0].value.toBigInt();
}
}
export class SetWarmupLengthCall__Outputs {
_call: SetWarmupLengthCall;
constructor(call: SetWarmupLengthCall) {
this._call = call;
}
}
export class StakeCall extends ethereum.Call {
get inputs(): StakeCall__Inputs {
return new StakeCall__Inputs(this);
}
get outputs(): StakeCall__Outputs {
return new StakeCall__Outputs(this);
}
}
export class StakeCall__Inputs {
_call: StakeCall;
constructor(call: StakeCall) {
this._call = call;
}
get _to(): Address {
return this._call.inputValues[0].value.toAddress();
}
get _amount(): BigInt {
return this._call.inputValues[1].value.toBigInt();
}
get _rebasing(): boolean {
return this._call.inputValues[2].value.toBoolean();
}
get _claim(): boolean {
return this._call.inputValues[3].value.toBoolean();
}
}
export class StakeCall__Outputs {
_call: StakeCall;
constructor(call: StakeCall) {
this._call = call;
}
get value0(): BigInt {
return this._call.outputValues[0].value.toBigInt();
}
}
export class ToggleLockCall extends ethereum.Call {
get inputs(): ToggleLockCall__Inputs {
return new ToggleLockCall__Inputs(this);
}
get outputs(): ToggleLockCall__Outputs {
return new ToggleLockCall__Outputs(this);
}
}
export class ToggleLockCall__Inputs {
_call: ToggleLockCall;
constructor(call: ToggleLockCall) {
this._call = call;
}
}
export class ToggleLockCall__Outputs {
_call: ToggleLockCall;
constructor(call: ToggleLockCall) {
this._call = call;
}
}
export class UnstakeCall extends ethereum.Call {
get inputs(): UnstakeCall__Inputs {
return new UnstakeCall__Inputs(this);
}
get outputs(): UnstakeCall__Outputs {
return new UnstakeCall__Outputs(this);
}
}
export class UnstakeCall__Inputs {
_call: UnstakeCall;
constructor(call: UnstakeCall) {
this._call = call;
}
get _to(): Address {
return this._call.inputValues[0].value.toAddress();
}
get _amount(): BigInt {
return this._call.inputValues[1].value.toBigInt();
}
get _trigger(): boolean {
return this._call.inputValues[2].value.toBoolean();
}
get _rebasing(): boolean {
return this._call.inputValues[3].value.toBoolean();
}
}
export class UnstakeCall__Outputs {
_call: UnstakeCall;
constructor(call: UnstakeCall) {
this._call = call;
}
get amount_(): BigInt {
return this._call.outputValues[0].value.toBigInt();
}
}
export class UnwrapCall extends ethereum.Call {
get inputs(): UnwrapCall__Inputs {
return new UnwrapCall__Inputs(this);
}
get outputs(): UnwrapCall__Outputs {
return new UnwrapCall__Outputs(this);
}
}
export class UnwrapCall__Inputs {
_call: UnwrapCall;
constructor(call: UnwrapCall) {
this._call = call;
}
get _to(): Address {
return this._call.inputValues[0].value.toAddress();
}
get _amount(): BigInt {
return this._call.inputValues[1].value.toBigInt();
}
}
export class UnwrapCall__Outputs {
_call: UnwrapCall;
constructor(call: UnwrapCall) {
this._call = call;
}
get sBalance_(): BigInt {
return this._call.outputValues[0].value.toBigInt();
}
}
export class WrapCall extends ethereum.Call {
get inputs(): WrapCall__Inputs {
return new WrapCall__Inputs(this);
}
get outputs(): WrapCall__Outputs {
return new WrapCall__Outputs(this);
}
}
export class WrapCall__Inputs {
_call: WrapCall;
constructor(call: WrapCall) {
this._call = call;
}
get _to(): Address {
return this._call.inputValues[0].value.toAddress();
}
get _amount(): BigInt {
return this._call.inputValues[1].value.toBigInt();
}
}
export class WrapCall__Outputs {
_call: WrapCall;
constructor(call: WrapCall) {
this._call = call;
}
get gBalance_(): BigInt {
return this._call.outputValues[0].value.toBigInt();
}
}
|
function extractAttributes(htmlSnippet, anchorText) {
const parser = new DOMParser();
const doc = parser.parseFromString(htmlSnippet, 'text/html');
const anchor = Array.from(doc.querySelectorAll('a')).find(a => a.textContent.trim() === anchorText);
if (anchor) {
return {
class: anchor.getAttribute('class'),
href: anchor.getAttribute('href')
};
} else {
return null; // Handle case when anchor tag with specified text is not found
}
}
|
package luohuayu.anticheat.plugin;
import org.bukkit.Bukkit;
import org.bukkit.command.Command;
import org.bukkit.command.CommandExecutor;
import org.bukkit.command.CommandSender;
import org.bukkit.entity.Player;
import java.io.File;
public class CommandHandler implements CommandExecutor {
@Override
public boolean onCommand(CommandSender sender, Command command, String s, String[] args) {
if (!sender.equals(Bukkit.getConsoleSender())) {
sender.sendMessage("指令只能由后台执行!");
return false;
}
if (args.length >= 1) {
if ("setmodlist".equals(args[0]) && args.length >= 2) {
Player player = Bukkit.getPlayer(args[1]);
if (player != null) {
PlayerHandler playerHandler = PlayerManager.getPlayerData(player);
if (playerHandler != null && !playerHandler.getModList().isEmpty()) {
CatAntiCheat.plugin.modDataManager.setModList(playerHandler.getModList());
if (CatAntiCheat.plugin.modDataManager.save()) {
sender.sendMessage("设置MOD列表成功");
} else {
sender.sendMessage("无法保存MOD列表");
}
return true;
}
}
sender.sendMessage("无法获取玩家MOD列表, 请确认玩家在线");
return true;
} else if ("screenshot".equals(args[0]) && args.length >= 2) {
Player player = Bukkit.getPlayer(args[1]);
if (player != null) {
PlayerHandler playerHandler = PlayerManager.getPlayerData(player);
if (playerHandler != null) {
playerHandler.sendScreenshot();
sender.sendMessage("已向客户端发送截图请求");
return true;
}
}
sender.sendMessage("截图玩家失败, 请确认玩家在线");
return true;
} else if ("modlist".equals(args[0]) && args.length >= 2) {
Player player = Bukkit.getPlayer(args[1]);
if (player != null) {
PlayerHandler playerHandler = PlayerManager.getPlayerData(player);
if (playerHandler != null && !playerHandler.getModList().isEmpty()) {
sender.sendMessage(player.getName() + " 的MOD列表:");
for (String mod : playerHandler.getModList()) {
sender.sendMessage(mod);
}
return true;
}
}
sender.sendMessage("无法获取玩家MOD列表, 请确认玩家在线");
return true;
} else if ("reload".equals(args[0])) {
CatAntiCheat.plugin.reloadConfig();
CatAntiCheat.config = new Config(CatAntiCheat.plugin.getConfig());
CatAntiCheat.plugin.modDataManager = new ModDataManager(new File(CatAntiCheat.plugin.getDataFolder(), "modlist.yml"));
sender.sendMessage("配置文件重置成功");
return true;
}
}
sender.sendMessage("========== CatAntiCheat ==========");
sender.sendMessage("/cac setmodlist <玩家> --- 设置MOD列表为该玩家的列表");
sender.sendMessage("/cac screenshot <玩家> --- 截图该玩家");
sender.sendMessage("/cac modlist <玩家> --- 查看该玩家的MOD列表");
sender.sendMessage("/cac reload --- 重置配置文件和MOD列表");
sender.sendMessage("==================================");
return true;
}
}
|
TERMUX_PKG_HOMEPAGE=https://gif.ski/
TERMUX_PKG_DESCRIPTION="GIF encoder based on libimagequant"
TERMUX_PKG_LICENSE="AGPL-V3"
TERMUX_PKG_VERSION=0.10.4
TERMUX_PKG_SRCURL=https://github.com/ImageOptim/gifski/archive/$TERMUX_PKG_VERSION.tar.gz
TERMUX_PKG_SHA256=0fd4b6beb880bb7719a3fb707f8f42678a62c8cf9bbb90f369f043864bbcc5ed
TERMUX_PKG_BUILD_IN_SRC=true
|
<reponame>altmetric/abstractifier<filename>lib/abstractifier.rb<gh_stars>0
# encoding: UTF-8
class Abstractifier
DEFAULT_MINIMUM_LENGTH = 80
DEFAULT_MAXIMUM_LENGTH = 250
attr_accessor :max_length, :min_length, :elider
def initialize(options = {})
@min_length = options.fetch(:min, DEFAULT_MINIMUM_LENGTH)
@max_length = options.fetch(:max, DEFAULT_MAXIMUM_LENGTH)
@elider = options.fetch(:elider, '…')
end
def abstractify(string)
output = ''
extract_sentences(string).each do |sentence|
output << "#{sentence}. "
break if output.length >= min_length
end
output = forcibly_truncate(output) if output.length > max_length
output = tidy(output)
output
end
private
def forcibly_truncate(string)
truncated = string[0, max_length + 1].strip.split(/\s\b\w+$/).first
strip_trailing_punctuation(truncated)
end
def extract_sentences(string)
string
.gsub(/[[:space:]]+/, ' ')
.split(/\.(?:\s|$)/)
end
def strip_trailing_punctuation(string)
if string[-1] =~ /[\.\?\!]/
string
elsif string[-1] =~ /[[:punct:]]/
string[0..-2] + elider
else
string + elider
end
end
def tidy(string)
string
.gsub(/[[:space:]]+/, ' ')
.gsub(/[[:space:]](,|\.)/, '\1')
.strip
end
end
|
class MigrationOperation:
def __init__(self, name):
self.name = name
def execute(self):
# Simulate the execution of the migration operation
print(f"Executing migration operation: {self.name}")
class MigrationSystem:
def __init__(self):
self.operations = []
def add_migration(self, operation):
# Add the migration operation to the list of operations
self.operations.append(operation)
def execute_migrations(self):
# Execute all the added migration operations in order
for operation in self.operations:
operation.execute()
# Example usage
if __name__ == "__main__":
# Create a migration system
migration_system = MigrationSystem()
# Create migration operations
operation1 = MigrationOperation("CreateTable1")
operation2 = MigrationOperation("CreateTable2")
# Add migration operations to the system
migration_system.add_migration(operation1)
migration_system.add_migration(operation2)
# Execute migrations
migration_system.execute_migrations()
|
#!/bin/bash
TASK=12
MODEL=ctrl_vilbert
MODEL_CONFIG=ctrl_vilbert_base
TASKS_CONFIG=ctrl_test_marvl
TRTASK=NLVR2
TETASK=MaRVLsw
TEXT_PATH=/home/projects/ku_00062/data/marvl/annotations_machine-translate/marvl-sw_gmt.jsonl
FEAT_PATH=/home/projects/ku_00062/data/marvl/features/marvl-sw_boxes36.lmdb
PRETRAINED=/home/projects/ku_00062/checkpoints/marvl/${MODEL}/${TRTASK}_${MODEL_CONFIG}/pytorch_model_best.bin
OUTPUT_DIR=/home/projects/ku_00062/results/marvl/${MODEL}/${TRTASK}_${MODEL_CONFIG}/$TETASK-en_mt/test
source /home/projects/ku_00062/envs/marvl/bin/activate
cd ../../../volta
python eval_task.py \
--bert_model /home/projects/ku_00062/huggingface/bert-base-uncased \
--config_file config/${MODEL_CONFIG}.json \
--from_pretrained ${PRETRAINED} --do_lower_case \
--val_annotations_jsonpath ${TEXT_PATH} --val_features_lmdbpath ${FEAT_PATH} \
--tasks_config_file config_tasks/${TASKS_CONFIG}.yml --task $TASK --split test \
--output_dir ${OUTPUT_DIR} \
deactivate
|
<reponame>noear/solon_demo<gh_stars>1-10
package demo_sessionstate_jwt.controller;
import org.noear.solon.annotation.Controller;
import org.noear.solon.annotation.Mapping;
import org.noear.solon.core.handle.Context;
/**
* @author noear 2021/8/6 created
*/
@Controller
public class LoginController {
/**
* 登录
* */
@Mapping("/login")
public void login(Context ctx, String name) {
if ("noear".equals(name)) {
//登录成功
ctx.sessionSet("user_name", name);
}
}
/**
* 退出
* */
@Mapping("/logout")
public void logout(Context ctx) {
ctx.sessionClear();
}
}
|
import cx from 'classnames'
import { connect } from 'react-redux'
import { MODEL, fields, Field, actions, selectors } from './model.js'
import { Error, Save, Ok, Sync, Circle } from 'components/Icons'
import { getRoutePayload, toRoute } from 'prodsys/ducks/router'
import './StoryTable.scss'
const TableCell = ({
className,
onClick,
saveChanges,
isDirty,
name,
...props
}) => (
<div
title={R.path([name, 'label'], fields)}
className={className}
onClick={onClick}
>
<Field name={name} {...props} label="" />
</div>
)
const iconAndTitle = (status, autoSave) =>
({
ok: [Ok, 'endringer lagret'],
dirty: autoSave
? [Sync, 'saken lagres automatisk']
: [Save, 'klikk for å lagre'],
syncing: [Sync, 'lagrer'],
error: [Error, 'feil'],
}[status])
let SaveIndicator = ({ onClick, autoSave, className, status, saveHandler }) => {
const [Icon, title] = iconAndTitle(status, autoSave)
return (
<div
title={title}
onClick={status == 'dirty' ? saveHandler : onClick}
className={cx('SaveIndicator', className, status)}
>
<Icon />
</div>
)
}
SaveIndicator = connect(
(state, { pk }) =>
R.applySpec({
autoSave: selectors.getAutosave,
status: selectors.getItemStatus(pk),
})(state),
(dispatch, { pk }) => ({
saveHandler: () => dispatch(actions.itemSave(pk, null)),
}),
)(SaveIndicator)
// render all headers in table
const DumbTableRow = props => (
<>
<TableCell {...props} name="working_title" />
<TableCell {...props} name="publication_status" />
<TableCell {...props} name="story_type" />
<TableCell {...props} name="modified" relative />
<TableCell {...props} name="image_count" />
<SaveIndicator {...props} />
</>
)
const TableRow = connect(
(state, { pk, row, selected, action }) => {
const data = selectors.getItem(pk)(state) || {}
const className = cx('TableCell', `status-${data.publication_status}`, {
selected,
odd: row % 2,
})
return { className, pk }
},
(dispatch, { pk, action }) => ({
onClick: e => dispatch(toRoute({ model: MODEL, action, pk: pk })),
}),
)(DumbTableRow)
const StoryTable = ({ action, currentItem, items = [] }) => (
<section className="StoryTable">
{items.map((pk, index) => (
<TableRow
key={pk}
pk={pk}
row={index}
action={action}
selected={pk == currentItem}
/>
))}
</section>
)
export default connect(state => {
const { pk, action } = getRoutePayload(state)
const items = selectors.getItemList(state)
return {
items,
action: action == 'list' ? 'change' : action,
currentItem: pk,
}
})(StoryTable)
|
<filename>config.ru
require './lib/pail'
run Pail::Service
|
<filename>agent/cache_test.go
package agent
import (
"fmt"
"log"
"os"
"testing"
"time"
)
func getTenant() interface{} {
return map[string]string{
"key1": "value1",
"key2": fmt.Sprintf("%v", time.Now()),
}
}
func TestLocalCache(t *testing.T) {
tenant := getTenant()
cache := newLocalCache(tenant, cacheTimeout, true, log.New(os.Stdout, "", 0))
loader := false
result := cache.GetOrSet("MyKey01", false, func(i interface{}, s string) interface{} {
loader = true
return "hello world"
})
if !loader {
t.Fatal("loader has not been executed.")
}
if result.(string) != "hello world" {
t.Fatal("result was different than expected.")
}
cache2 := newLocalCache(tenant, cacheTimeout, true, log.New(os.Stdout, "", 0))
loader = false
for i := 0; i < 10; i++ {
result = cache2.GetOrSet("MyKey01", false, func(i interface{}, s string) interface{} {
loader = true
return "hello world"
})
if loader {
t.Fatal("loader has been executed.")
}
if result.(string) != "hello world" {
t.Fatal("result was different than expected.")
}
}
}
|
<filename>dolly_gazebo/launch/dolly.launch.py
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch Gazebo with a world that has Dolly, as well as the follow node."""
from launch import LaunchDescription
import launch.actions
import launch.substitutions
import launch_ros.actions
def generate_launch_description():
gzserver_exe = launch.actions.ExecuteProcess(
cmd=['gzserver', '--verbose', '-s', 'libgazebo_ros_init.so',
launch.substitutions.LaunchConfiguration('world')],
output='screen'
)
gzclient_exe = launch.actions.ExecuteProcess(
cmd=['gzclient'],
output='screen'
)
follow = launch_ros.actions.Node(
package='dolly_follow',
node_executable='dolly_follow',
output='screen',
remappings=[
('cmd_vel', '/dolly/cmd_vel'),
('laser_scan', '/dolly/laser_scan')
]
)
return LaunchDescription([
launch.actions.DeclareLaunchArgument(
'world',
default_value=['worlds/empty.world', ''],
description='Gazebo world file'),
gzserver_exe,
gzclient_exe,
follow
])
|
#!/bin/bash
time docker build -t kyledinh/intellij-java8 -f java8.Dockerfile .
say -v Karen All done. Building the in telli J. image.
|
#!/bin/bash
DIR=$( dirname "${BASH_SOURCE[0]}" )
IMAGE_NAME=darcherframework/augur:latest
DAPP_IMAGE_NAME=darcherframework/augur-dapp:latest
#docker build -t $IMAGE_NAME -f "$DIR"/Dockerfile "$DIR"/../..
docker build -t $DAPP_IMAGE_NAME -f "$DIR"/dapp.Dockerfile "$DIR"/../..
|
<reponame>terablade2001/vp-cpp-template<filename>C++/Common/Common.cpp
#include <Common.hpp>
int CommonFunction() {
dbg_(63,"CommonFunction called")
return 0;
}
|
<gh_stars>10-100
/**
* If you want to use Gitlab pages,
* you need to adjust the path for your project.
*
* Please see this documentation https://cli.vuejs.org/ru/guide/deployment.html#gitlab-pages
*/
const gitlabPagePath = () => {
if (process.env.NODE_ENV === 'production') {
if (process.env.CI !== undefined) {
return '/frontend/vue-madboiler/';
}
return '/';
}
return '/';
};
module.exports = {
publicPath: gitlabPagePath()
};
|
#!/usr/bin/env bash
# Install files:
function install() {
echo "Installing into ${1} …"
# Remote repo zip file:
SOURCE_ZIP="https://github.com/mhulse/vagrant-latmp/tarball/master"
# Get the zip file and extract all files:
curl -sS -#L "$SOURCE_ZIP" | tar -xzv --strip-components 1 --exclude={install.sh,.*,README*,LICENSE}
# Testing (comment out the above and run these lines instead):
#SOURCE_ZIP="/Users/mhulse/Desktop/test.tar.gz"
#tar --strip-components=1 -zxf $SOURCE_ZIP
# Let the use know that we are done:
echo $'\n'"Congrats! Installation was successful!"$'\n'
# Open installation folder:
open "."
}
# Check if installation directory meets our requirements:
function empty() {
# Use `ls -A` if you want to account for hidden files:
if [ -d "$1" ] && [ "$(ls $1)" ]; then
# If chosen directory exists, and it’s not empty:
echo "$1 must be an empty directory."
echo "Remove files and try running this script again."
else
# Move on to the installation function:
install $1
fi
}
# Tidy up the terminal window:
clear
# Create menu:
empty "$(pwd)"
# Exit program:
exit 0
# Done!
# For more information about this script, see:
# https://github.com/mhulse/install-scripts
|
<gh_stars>1-10
/**
* Copyright 2014 isandlaTech
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.psem2m.isolates.ui.admin.panels;
import java.awt.BorderLayout;
import java.awt.Font;
import javax.swing.JPanel;
import javax.swing.JScrollPane;
import javax.swing.JTextArea;
import org.psem2m.isolates.base.IIsolateLoggerSvc;
import org.psem2m.isolates.ui.admin.api.CJPanel;
import org.psem2m.isolates.ui.admin.api.EUiAdminFont;
/**
* @author ogattaz
*
*/
public class CJPanelConfiguration extends CJPanel {
private static final long serialVersionUID = -5537324836085851473L;
private JTextArea pConfigTextArea;
private JScrollPane pConfigurationScrollPane;
/**
*
*/
public CJPanelConfiguration() {
super();
newGUI();
}
/**
* @param aLogger
*/
public CJPanelConfiguration(final IIsolateLoggerSvc aLogger,
final JPanel aPanel) {
super(aLogger);
aPanel.setLayout(new BorderLayout(0, 0));
aPanel.add(newGUI(), BorderLayout.CENTER);
}
/*
* (non-Javadoc)
*
* @see org.psem2m.isolates.ui.admin.panels.CJPanel#destroy()
*/
@Override
public void destroy() {
// TODO Auto-generated method stub
}
/*
* (non-Javadoc)
*
* @see org.psem2m.isolates.ui.admin.panels.CJPanel#newGUI()
*/
@Override
public JPanel newGUI() {
setLayout(new BorderLayout(0, 0));
{
pConfigurationScrollPane = new JScrollPane();
this.add(pConfigurationScrollPane, BorderLayout.CENTER);
{
pConfigTextArea = new JTextArea();
setTextFont(EUiAdminFont.NORMAL);
setText("Config ...");
pConfigurationScrollPane.setViewportView(pConfigTextArea);
}
}
return this;
}
/*
* (non-Javadoc)
*
* @see
* org.psem2m.isolates.ui.admin.panels.CJPanel#setText(java.lang.String)
*/
@Override
public void setText(final String aText) {
pConfigTextArea.setText(aText);
}
/*
* (non-Javadoc)
*
* @see
* org.psem2m.isolates.ui.admin.panels.CJPanel#setTextFont(java.lang.String,
* int)
*/
@Override
public Font setTextFont(final EUiAdminFont aUiAdminFont) {
pConfigTextArea.setFont(aUiAdminFont.getTextFont());
return aUiAdminFont.getTextFont();
}
}
|
<?php
$serverName = "example_server";
$connectionOptions = array(
"Database" => "myDB",
"Uid" => "example_username",
"PWD" => "example_password"
);
//Establishes the connection
$conn = sqlsrv_connect($serverName, $connectionOptions);
if($conn === false)
{
die(FormatErrors(sqlsrv_errors()));
}
// An example SQL statement that returns employee data
$tsql= "SELECT Firstname, Lastname, Salary
FROM Employees";
$getResults= sqlsrv_query($conn, $tsql);
echo ("Reading data from table cs262_Task7_Employees" . PHP_EOL);
if ($getResults == FALSE)
{
die(FormatErrors(sqlsrv_errors()));
}
$row = sqlsrv_fetch_array($getResults, SQLSRV_FETCH_ASSOC);
echo("Employee Name: ". $row['Firstname'] . " " . $row['Lastname'] . "- Salary: $" .$row['Salary']);
sqlsrv_free_stmt($getResults);
function FormatErrors( $errors )
{
// Display errors
echo "Error information: <br/>";
foreach ( $errors as $error )
{
echo "SQLSTATE: ".$error['SQLSTATE']."<br/>";
echo "Code: ".$error['code']."<br/>";
echo "Message: ".$error['message']."<br/>";
}
}
?>
|
#!/bin/bash
#If the installer fails, change #!/bin/bash to #!/bin/sh
printf "Installing Apigee Integration...\n"
DEFINITION_PATH="/var/db/newrelic-infra/custom-integrations"
CONFIG_PATH="/etc/newrelic-infra/integrations.d"
SERVICE='newrelic-infra'
#check os release
if [ -f /etc/os-release ]; then #amazon/redhat/fedora check
. /etc/os-release
OS=$NAME
VERSION=$VERSION_ID
printf "OS Name: $OS\n"
printf "Version: $VERSION\n"
elif [ -f /etc/lsb-release ]; then #ubuntu/debian check
. /etc/lsb-release
OS=$DISTRIB_ID
VERSION=$DISTRIB_RELEASE
printf "OS Name: $OS\n"
printf "Version: $VERSION\n"
fi
#check init system
initCmd=`ps -p 1 | grep init | awk '{print $4}'`
if [ "$initCmd" == "init" ]; then
SYSCMD='upstart'
fi
sysdCmd=`ps -p 1 | grep systemd | awk '{print $4}'`
if [ "$sysdCmd" == "systemd" ]; then
SYSCMD='systemd'
fi
#copy files [add more as needed]
printf "Copying files...\n"
cp nri-apigee $DEFINITION_PATH
cp nri-apigee_metrics-settings.yml $DEFINITION_PATH
cp nri-apigee-definition.yml $DEFINITION_PATH
cp nri-apigee-config.yml $CONFIG_PATH
printf "Script complete. Restarting Infrastructure Agent...\n"
if [ $SYSCMD == "systemd" ]; then
systemctl restart $SERVICE
elif [ $SYSCMD == "upstart" ]; then
initctl restart $SERVICE
fi
|
try:
import wpilib
except ImportError:
from pyfrc import wpilib
from common.autonomous_helper import StatefulAutonomous, timed_state
class TimedShootAutonomous(StatefulAutonomous):
'''
Tunable autonomous mode that does dumb time-based shooting
decisions. Works consistently.
'''
DEFAULT = False
MODE_NAME = "Timed shoot"
def __init__(self, components):
super().__init__(components)
self.register_sd_var('drive_speed', 0.5)
def on_disable(self):
'''This function is called when autonomous mode is disabled'''
pass
def update(self, tm):
# always keep the arm down
self.intake.armDown()
if tm > 0.3:
self.catapult.pulldown()
super().update(tm)
@timed_state(duration=1.2, next_state='drive', first=True)
def drive_wait(self, tm, state_tm):
'''Wait some period before we start driving'''
pass
@timed_state(duration=1.4, next_state='launch')
def drive(self, tm, state_tm):
'''Start the launch sequence! Drive slowly forward for N seconds'''
self.drive.move(0, self.drive_speed, 0)
@timed_state(duration=1.0)
def launch(self, tm):
'''Finally, fire and keep firing for 1 seconds'''
self.catapult.launchNoSensor()
|
#!/bin/bash
trap 'echo "${BASH_SOURCE[0]}: line ${LINENO}: status ${?}: user ${USER}: func ${FUNCNAME[0]}"' ERR
set -o errexit
set -o errtrace
export COMPONENT_NAME="ohs1"
export COMPONENT_ADMIN_LISTEN_ADDRESS="127.0.0.1"
export COMPONENT_ADMIN_LISTEN_PORT="9999"
export COMPONENT_LISTEN_ADDRESS=""
export COMPONENT_LISTEN_PORT="7777"
export COMPONENT_SSL_LISTEN_PORT="4443"
|
import nltk
import numpy as np
import tensorflow as tf
from tensorflow import keras
# nltk.download('punkt')
# load dataset
with open('chatbot_dataset.txt', 'r', encoding='utf8', errors='ignore') as file:
corpus = file.readlines()
# extract sentences and questions from the dataset
data = []
for line in corpus:
_line = line.split(';')
if len(_line) == 2:
data.append(line)
questions = []
answers = []
# partial data (subset of full data)
for line in data:
_line = line.split(';')
questions.append(_line[0])
answers.append(_line[1])
# create word2vec embedding
tokenizer = keras.preprocessing.text.Tokenizer()
tokenizer.fit_on_texts(questions + answers)
# print out some info
vocab_size = len(tokenizer.word_index) + 1
print("Vocabulary size:", vocab_size)
# # store embedding of words
# embedding_matrix = np.zeros((vocab_size, 250))
# for word, i in tokenizer.word_index.items():
# embedding_vector = embeddings_index.get(word)
# if embedding_vector is not None:
# embedding_matrix[i] = embedding_vector
# keras model
model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 32, input_length=len(questions[0])))
model.add(keras.layers.Bidirectional(keras.layers.LSTM(32)))
model.add(keras.layers.Dense(vocab_size, activation="softmax"))
model.compile("rmsprop", "categorical_crossentropy")
# training the model
model.fit(questions, answers, epochs=400, batch_size=8)
# test the chatbot
def chatbot(sentence):
tokenized_sentence = tokenizer.texts_to_sequences([sentence])[0]
predicted_sentence = model.predict_classes(tokenized_sentence, verbose=0)
for word, index in tokenizer.word_index.items():
if index == predicted_sentence:
return word
# testing the chatbot
while True:
user_input = input("User: ")
if user_input == "exit":
print("Chatbot: Goodbye! :)")
break
else:
print("Chatbot:",chatbot(user_input))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.